From a5f70a8da50524984c63a3b1588f7a23971da9fe Mon Sep 17 00:00:00 2001 From: Daniel Sullivan Date: Tue, 16 Dec 2014 16:08:39 -0500 Subject: [PATCH 001/293] Remove call to multiprocessing. Fixes issue #422. --- pymode/rope.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pymode/rope.py b/pymode/rope.py index 01e153e4..159900bf 100644 --- a/pymode/rope.py +++ b/pymode/rope.py @@ -409,12 +409,7 @@ def _update_cache(importer, modules=None): importer.generate_modules_cache(modules) importer.project.sync() - sys.stdout, stdout_ = StringIO(), sys.stdout - sys.stderr, stderr_ = StringIO(), sys.stderr - process = multiprocessing.Process(target=_update_cache, args=( - self.importer, modules)) - process.start() - sys.stdout, sys.stderr = stdout_, stderr_ + _update_cache(self.importer, modules) class ProgressHandler(object): From e2521aa6b7d9ee8594b3b49a442e3f569b45cae1 Mon Sep 17 00:00:00 2001 From: Tyler Fenby Date: Sun, 21 Dec 2014 17:37:43 -0500 Subject: [PATCH 002/293] Upgrade included rope libs to latest releases rope: 0.10.2 rope_py3k: 0.9.4-1 --- pymode/libs2/rope/__init__.py | 2 +- pymode/libs2/rope/base/arguments.py | 2 + pymode/libs2/rope/base/builtins.py | 67 ++++-- pymode/libs2/rope/base/change.py | 12 +- pymode/libs2/rope/base/codeanalyze.py | 16 +- pymode/libs2/rope/base/default_config.py | 12 +- pymode/libs2/rope/base/evaluate.py | 16 +- pymode/libs2/rope/base/fscommands.py | 3 +- pymode/libs2/rope/base/libutils.py | 67 +++++- pymode/libs2/rope/base/oi/doa.py | 11 +- pymode/libs2/rope/base/oi/runmod.py | 19 +- pymode/libs2/rope/base/oi/soa.py | 5 +- pymode/libs2/rope/base/oi/soi.py | 11 + pymode/libs2/rope/base/oi/transform.py | 12 +- pymode/libs2/rope/base/prefs.py | 2 +- pymode/libs2/rope/base/project.py | 118 +++++++++- pymode/libs2/rope/base/pycore.py | 134 +++-------- pymode/libs2/rope/base/pynames.py | 14 +- pymode/libs2/rope/base/pyobjectsdef.py | 41 ++-- pymode/libs2/rope/base/pyscopes.py | 7 +- pymode/libs2/rope/base/resourceobserver.py | 3 +- pymode/libs2/rope/base/resources.py | 56 ++++- pymode/libs2/rope/base/stdmods.py | 10 +- pymode/libs2/rope/base/taskhandle.py | 2 - pymode/libs2/rope/base/utils.py | 5 + pymode/libs2/rope/base/worder.py | 21 +- pymode/libs2/rope/contrib/autoimport.py | 29 ++- pymode/libs2/rope/contrib/codeassist.py | 119 +++++++--- pymode/libs2/rope/contrib/finderrors.py | 2 +- pymode/libs2/rope/contrib/findit.py | 24 +- pymode/libs2/rope/contrib/fixmodnames.py | 4 +- pymode/libs2/rope/contrib/fixsyntax.py | 28 ++- pymode/libs2/rope/contrib/generate.py | 43 ++-- pymode/libs2/rope/refactor/__init__.py | 4 +- .../libs2/rope/refactor/change_signature.py | 50 ++-- .../libs2/rope/refactor/encapsulate_field.py | 39 ++-- pymode/libs2/rope/refactor/extract.py | 39 ++-- pymode/libs2/rope/refactor/functionutils.py | 22 +- .../rope/refactor/importutils/__init__.py | 76 ++++--- .../rope/refactor/importutils/actions.py | 92 ++++---- .../rope/refactor/importutils/importinfo.py | 16 +- .../refactor/importutils/module_imports.py | 109 ++++++--- pymode/libs2/rope/refactor/inline.py | 160 +++++++------ .../libs2/rope/refactor/introduce_factory.py | 30 +-- .../rope/refactor/introduce_parameter.py | 9 +- pymode/libs2/rope/refactor/localtofield.py | 11 +- pymode/libs2/rope/refactor/method_object.py | 21 +- pymode/libs2/rope/refactor/move.py | 213 +++++++++++++----- pymode/libs2/rope/refactor/multiproject.py | 8 +- pymode/libs2/rope/refactor/occurrences.py | 104 ++++++--- pymode/libs2/rope/refactor/patchedast.py | 37 ++- pymode/libs2/rope/refactor/rename.py | 40 ++-- pymode/libs2/rope/refactor/restructure.py | 24 +- pymode/libs2/rope/refactor/similarfinder.py | 34 +-- pymode/libs2/rope/refactor/sourceutils.py | 9 +- pymode/libs2/rope/refactor/suites.py | 1 + pymode/libs2/rope/refactor/topackage.py | 8 +- pymode/libs2/rope/refactor/usefunction.py | 15 +- pymode/libs2/rope/refactor/wildcards.py | 6 +- pymode/libs3/rope/__init__.py | 2 +- pymode/libs3/rope/refactor/patchedast.py | 26 +-- pymode/libs3/rope/refactor/suites.py | 9 - 62 files changed, 1331 insertions(+), 800 deletions(-) diff --git a/pymode/libs2/rope/__init__.py b/pymode/libs2/rope/__init__.py index 19466380..c8e11f68 100644 --- a/pymode/libs2/rope/__init__.py +++ b/pymode/libs2/rope/__init__.py @@ -1,7 +1,7 @@ """rope, a python refactoring library""" INFO = __doc__ -VERSION = '0.9.4' +VERSION = '0.10.2' COPYRIGHT = """\ Copyright (C) 2006-2012 Ali Gholami Rudi Copyright (C) 2009-2012 Anton Gritsay diff --git a/pymode/libs2/rope/base/arguments.py b/pymode/libs2/rope/base/arguments.py index 342e2ae5..7ba43640 100644 --- a/pymode/libs2/rope/base/arguments.py +++ b/pymode/libs2/rope/base/arguments.py @@ -72,6 +72,8 @@ def get_pynames(self, parameters): def get_instance_pyname(self): return self.pynames[0] + + class MixedArguments(object): def __init__(self, pyname, arguments, scope): diff --git a/pymode/libs2/rope/base/builtins.py b/pymode/libs2/rope/base/builtins.py index 78e7afb0..5bb84859 100644 --- a/pymode/libs2/rope/base/builtins.py +++ b/pymode/libs2/rope/base/builtins.py @@ -149,8 +149,10 @@ def _get_builtin(*args): return cls._generated[args] return _get_builtin + def _create_builtin_getter(cls): type_getter = _create_builtin_type_getter(cls) + def _get_builtin(*args): return pyobjects.PyObject(type_getter(*args)) return _get_builtin @@ -233,7 +235,7 @@ def __call__(self, name, returned=None, function=None, except AttributeError: if check_existence: raise - builtin=None + builtin = None self.attributes[name] = BuiltinName( BuiltinFunction(returned=returned, function=function, argnames=argnames, builtin=builtin)) @@ -252,7 +254,8 @@ def __init__(self, holding=None): collector('__new__', function=self._new_list) # Adding methods - collector('append', function=self._list_add, argnames=['self', 'value']) + collector('append', function=self._list_add, + argnames=['self', 'value']) collector('__setitem__', function=self._list_add, argnames=['self', 'index', 'value']) collector('insert', function=self._list_add, @@ -306,7 +309,6 @@ class Dict(BuiltinClass): def __init__(self, keys=None, values=None): self.keys = keys self.values = values - item = get_tuple(self.keys, self.values) collector = _AttributeCollector(dict) collector('__new__', function=self._new_dict) collector('__setitem__', function=self._dict_add) @@ -327,7 +329,8 @@ def do_create(holding=None): if holding is None: return get_dict() type = holding.get_type() - if isinstance(type, Tuple) and len(type.get_holding_objects()) == 2: + if isinstance(type, Tuple) and \ + len(type.get_holding_objects()) == 2: return get_dict(*type.get_holding_objects()) return _create_builtin(args, do_create) @@ -384,7 +387,7 @@ def _self_set(self, context): if new_dict and isinstance(new_dict.get_object().get_type(), Dict): args = arguments.ObjectArguments([new_dict]) items = new_dict.get_object()['popitem'].\ - get_object().get_returned_object(args) + get_object().get_returned_object(args) context.save_per_name(items) else: holding = _infer_sequence_for_pyname(new_dict) @@ -405,7 +408,8 @@ def __init__(self, *objects): first = objects[0] attributes = { '__getitem__': BuiltinName(BuiltinFunction(first)), - '__getslice__': BuiltinName(BuiltinFunction(pyobjects.PyObject(self))), + '__getslice__': + BuiltinName(BuiltinFunction(pyobjects.PyObject(self))), '__new__': BuiltinName(BuiltinFunction(function=self._new_tuple)), '__iter__': BuiltinName(BuiltinFunction(get_iterator(first)))} super(Tuple, self).__init__(tuple, attributes) @@ -485,8 +489,9 @@ def __init__(self): self_methods = ['__getitem__', '__getslice__', 'capitalize', 'center', 'decode', 'encode', 'expandtabs', 'join', 'ljust', - 'lower', 'lstrip', 'replace', 'rjust', 'rstrip', 'strip', - 'swapcase', 'title', 'translate', 'upper', 'zfill'] + 'lower', 'lstrip', 'replace', 'rjust', 'rstrip', + 'strip', 'swapcase', 'title', 'translate', 'upper', + 'zfill'] for method in self_methods: collector(method, self_object) @@ -514,6 +519,7 @@ def get_object(self): def get_definition_location(self): return (None, None) + class Iterator(pyobjects.AbstractClass): def __init__(self, holding=None): @@ -539,7 +545,8 @@ def __init__(self, holding=None): self.holding = holding self.attributes = { 'next': BuiltinName(BuiltinFunction(self.holding)), - '__iter__': BuiltinName(BuiltinFunction(get_iterator(self.holding))), + '__iter__': BuiltinName(BuiltinFunction( + get_iterator(self.holding))), 'close': BuiltinName(BuiltinFunction()), 'send': BuiltinName(BuiltinFunction()), 'throw': BuiltinName(BuiltinFunction())} @@ -556,10 +563,10 @@ def get_returned_object(self, args): class File(BuiltinClass): def __init__(self): - self_object = pyobjects.PyObject(self) str_object = get_str() str_list = get_list(get_str()) attributes = {} + def add(name, returned=None, function=None): builtin = getattr(file, name, None) attributes[name] = BuiltinName( @@ -587,7 +594,8 @@ def __init__(self, fget=None, fset=None, fdel=None, fdoc=None): 'fget': BuiltinName(BuiltinFunction()), 'fset': BuiltinName(pynames.UnboundName()), 'fdel': BuiltinName(pynames.UnboundName()), - '__new__': BuiltinName(BuiltinFunction(function=_property_function))} + '__new__': BuiltinName( + BuiltinFunction(function=_property_function))} super(Property, self).__init__(property, attributes) def get_property_object(self, args): @@ -631,7 +639,7 @@ def get_attributes(self): return {} def get_name(self): - return 'lambda' + return 'lambda' def get_param_names(self, special_args=True): result = [node.id for node in self.arguments.args @@ -671,7 +679,7 @@ def _infer_sequence_for_pyname(pyname): iter = obj.get_returned_object(args) if iter is not None and 'next' in iter: holding = iter['next'].get_object().\ - get_returned_object(args) + get_returned_object(args) return holding @@ -690,12 +698,15 @@ def _create_builtin(args, creator): def _range_function(args): return get_list() + def _reversed_function(args): return _create_builtin(args, get_iterator) + def _sorted_function(args): return _create_builtin(args, get_list) + def _super_function(args): passed_class, passed_self = args.get_arguments(['type', 'self']) if passed_self is None: @@ -709,6 +720,7 @@ def _super_function(args): return pyobjects.PyObject(supers[0]) return passed_self + def _zip_function(args): args = args.get_pynames(['sequence']) objects = [] @@ -721,6 +733,7 @@ def _zip_function(args): tuple = get_tuple(*objects) return get_list(tuple) + def _enumerate_function(args): passed = args.get_pynames(['sequence'])[0] if passed is None: @@ -730,6 +743,7 @@ def _enumerate_function(args): tuple = get_tuple(None, holding) return get_iterator(tuple) + def _iter_function(args): passed = args.get_pynames(['sequence'])[0] if passed is None: @@ -738,6 +752,7 @@ def _iter_function(args): holding = _infer_sequence_for_pyname(passed) return get_iterator(holding) + def _input_function(args): return get_str() @@ -751,17 +766,25 @@ def _input_function(args): 'file': BuiltinName(get_file_type()), 'open': BuiltinName(get_file_type()), 'unicode': BuiltinName(get_str_type()), - 'range': BuiltinName(BuiltinFunction(function=_range_function, builtin=range)), - 'reversed': BuiltinName(BuiltinFunction(function=_reversed_function, builtin=reversed)), - 'sorted': BuiltinName(BuiltinFunction(function=_sorted_function, builtin=sorted)), - 'super': BuiltinName(BuiltinFunction(function=_super_function, builtin=super)), - 'property': BuiltinName(BuiltinFunction(function=_property_function, builtin=property)), + 'range': BuiltinName(BuiltinFunction(function=_range_function, + builtin=range)), + 'reversed': BuiltinName(BuiltinFunction(function=_reversed_function, + builtin=reversed)), + 'sorted': BuiltinName(BuiltinFunction(function=_sorted_function, + builtin=sorted)), + 'super': BuiltinName(BuiltinFunction(function=_super_function, + builtin=super)), + 'property': BuiltinName(BuiltinFunction(function=_property_function, + builtin=property)), 'zip': BuiltinName(BuiltinFunction(function=_zip_function, builtin=zip)), - 'enumerate': BuiltinName(BuiltinFunction(function=_enumerate_function, builtin=enumerate)), + 'enumerate': BuiltinName(BuiltinFunction(function=_enumerate_function, + builtin=enumerate)), 'object': BuiltinName(BuiltinObject()), 'type': BuiltinName(BuiltinType()), - 'iter': BuiltinName(BuiltinFunction(function=_iter_function, builtin=iter)), - 'raw_input': BuiltinName(BuiltinFunction(function=_input_function, builtin=raw_input)), - } + 'iter': BuiltinName(BuiltinFunction(function=_iter_function, + builtin=iter)), + 'raw_input': BuiltinName(BuiltinFunction(function=_input_function, + builtin=raw_input)), +} builtins = BuiltinModule('__builtin__', initial=_initial_builtins) diff --git a/pymode/libs2/rope/base/change.py b/pymode/libs2/rope/base/change.py index 8d19aac1..e9764484 100644 --- a/pymode/libs2/rope/base/change.py +++ b/pymode/libs2/rope/base/change.py @@ -2,7 +2,6 @@ import difflib import os import time -import warnings import rope.base.fscommands from rope.base import taskhandle, exceptions, utils @@ -17,13 +16,13 @@ class Change(object): def do(self, job_set=None): """Perform the change - + .. note:: Do use this directly. Use `Project.do()` instead. """ def undo(self, job_set=None): """Perform the change - + .. note:: Do use this directly. Use `History.undo()` instead. """ @@ -97,7 +96,8 @@ def __str__(self): date = datetime.datetime.fromtimestamp(self.time) if date.date() == datetime.date.today(): string_date = 'today' - elif date.date() == (datetime.date.today() - datetime.timedelta(1)): + elif date.date() == (datetime.date.today() - + datetime.timedelta(1)): string_date = 'yesterday' elif date.year == datetime.date.today().year: string_date = date.strftime('%b %d') @@ -257,7 +257,8 @@ class CreateFolder(CreateResource): """ def __init__(self, parent, name): - resource = parent.project.get_folder(self._get_child_path(parent, name)) + resource = parent.project.get_folder( + self._get_child_path(parent, name)) super(CreateFolder, self).__init__(resource) @@ -309,6 +310,7 @@ def count_changes(change): return result return 1 + def create_job_set(task_handle, change): return task_handle.create_jobset(str(change), count_changes(change)) diff --git a/pymode/libs2/rope/base/codeanalyze.py b/pymode/libs2/rope/base/codeanalyze.py index 3d2a2a45..87061912 100644 --- a/pymode/libs2/rope/base/codeanalyze.py +++ b/pymode/libs2/rope/base/codeanalyze.py @@ -18,6 +18,7 @@ def add_change(self, start, end, new_text=None): def get_changed(self): if not self.changes: return None + def compare_changes(change1, change2): return cmp(change1[:2], change2[:2]) self.changes.sort(compare_changes) @@ -131,6 +132,7 @@ def __call__(self): return result _main_chars = re.compile(r'[\'|"|#|\\|\[|\]|\{|\}|\(|\)]') + def _analyze_line(self, line): char = None for match in self._main_chars.finditer(line): @@ -142,8 +144,8 @@ def _analyze_line(self, line): if char * 3 == line[i:i + 3]: self.in_string = char * 3 elif self.in_string == line[i:i + len(self.in_string)] and \ - not (i > 0 and line[i - 1] == '\\' and - not (i > 1 and line[i - 2] == '\\')): + not (i > 0 and line[i - 1] == '\\' and + not (i > 1 and line[i - 2] == '\\')): self.in_string = '' if self.in_string: continue @@ -158,6 +160,7 @@ def _analyze_line(self, line): else: self.continuation = False + def custom_generator(lines): return _CustomGenerator(lines)() @@ -189,7 +192,6 @@ def generate_regions(self, start_line=1, end_line=None): # XXX: `block_start` should be at a better position! block_start = 1 readline = LinesToReadline(self.lines, block_start) - shifted = start_line - block_start + 1 try: for start, end in self._logical_lines(readline): real_start = start + block_start - 1 @@ -199,7 +201,7 @@ def generate_regions(self, start_line=1, end_line=None): real_end = end + block_start - 1 if real_start >= start_line: yield (real_start, real_end) - except tokenize.TokenError, e: + except tokenize.TokenError: pass def _block_logical_line(self, block_start, line_number): @@ -254,6 +256,7 @@ def __init__(self, lines, generate=custom_generator): self._generate = generate _starts = None + @property def starts(self): if self._starts is None: @@ -261,6 +264,7 @@ def starts(self): return self._starts _ends = None + @property def ends(self): if self._ends is None: @@ -326,6 +330,7 @@ def get_block_start(lines, lineno, maximum_indents=80): _block_start_pattern = None + def get_block_start_patterns(): global _block_start_pattern if not _block_start_pattern: @@ -350,9 +355,10 @@ def count_line_indents(line): def get_string_pattern(): start = r'(\b[uU]?[rR]?)?' longstr = r'%s"""(\\.|"(?!"")|\\\n|[^"\\])*"""' % start - shortstr = r'%s"(\\.|[^"\\\n])*"' % start + shortstr = r'%s"(\\.|\\\n|[^"\\])*"' % start return '|'.join([longstr, longstr.replace('"', "'"), shortstr, shortstr.replace('"', "'")]) + def get_comment_pattern(): return r'#[^\n]*' diff --git a/pymode/libs2/rope/base/default_config.py b/pymode/libs2/rope/base/default_config.py index ffebcd4f..0ee9937d 100644 --- a/pymode/libs2/rope/base/default_config.py +++ b/pymode/libs2/rope/base/default_config.py @@ -14,7 +14,7 @@ def set_prefs(prefs): # 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o' # 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o' prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject', - '.hg', '.svn', '_svn', '.git'] + '.hg', '.svn', '_svn', '.git', '.tox'] # Specifies which files should be considered python files. It is # useful when you have scripts inside your project. Only files @@ -79,6 +79,16 @@ def set_prefs(prefs): # appear in the importing namespace. prefs['ignore_bad_imports'] = False + # If `True`, rope will transform a comma list of imports into + # multiple separate import statements when organizing + # imports. + prefs['split_imports'] = False + + # If `True`, rope will sort imports alphabetically by module name + # instead of alphabetically by import statement, with from imports + # after normal imports. + prefs['sort_imports_alphabetically'] = False + def project_opened(project): """This function is called after opening the project""" diff --git a/pymode/libs2/rope/base/evaluate.py b/pymode/libs2/rope/base/evaluate.py index 6736b2a9..faf09407 100644 --- a/pymode/libs2/rope/base/evaluate.py +++ b/pymode/libs2/rope/base/evaluate.py @@ -6,6 +6,7 @@ BadIdentifierError = exceptions.BadIdentifierError + def eval_location(pymodule, offset): """Find the pyname at the offset""" return eval_location2(pymodule, offset)[1] @@ -40,7 +41,8 @@ def eval_str2(holding_scope, name): # parenthesizing for handling cases like 'a_var.\nattr' node = ast.parse('(%s)' % name) except SyntaxError: - raise BadIdentifierError('Not a resolvable python identifier selected.') + raise BadIdentifierError( + 'Not a resolvable python identifier selected.') return eval_node2(holding_scope, node) @@ -81,7 +83,8 @@ def get_primary_and_pyname_at(self, offset): keyword_name = self.worder.get_word_at(offset) pyobject = self.get_enclosing_function(offset) if isinstance(pyobject, pyobjects.PyFunction): - return (None, pyobject.get_parameters().get(keyword_name, None)) + return (None, + pyobject.get_parameters().get(keyword_name, None)) # class body if self._is_defined_in_class_body(holding_scope, offset, lineno): class_scope = holding_scope @@ -93,7 +96,8 @@ def get_primary_and_pyname_at(self, offset): except rope.base.exceptions.AttributeNotFoundError: return (None, None) # function header - if self._is_function_name_in_function_header(holding_scope, offset, lineno): + if self._is_function_name_in_function_header(holding_scope, + offset, lineno): name = self.worder.get_primary_at(offset).strip() return (None, holding_scope.parent[name]) # from statement module @@ -118,7 +122,7 @@ def get_enclosing_function(self, offset): if isinstance(pyobject, pyobjects.AbstractFunction): return pyobject elif isinstance(pyobject, pyobjects.AbstractClass) and \ - '__init__' in pyobject: + '__init__' in pyobject: return pyobject['__init__'].get_object() elif '__call__' in pyobject: return pyobject['__call__'].get_object() @@ -157,6 +161,7 @@ def _Call(self, node): primary, pyobject = self._get_primary_and_object_for_node(node.func) if pyobject is None: return + def _get_returned(pyobject): args = arguments.create_arguments(primary, pyobject, node, self.scope) @@ -295,7 +300,8 @@ def _call_function(self, node, function_name, other_args=None): return if function_name in pyobject: called = pyobject[function_name].get_object() - if not called or not isinstance(called, pyobjects.AbstractFunction): + if not called or \ + not isinstance(called, pyobjects.AbstractFunction): return args = [node] if other_args: diff --git a/pymode/libs2/rope/base/fscommands.py b/pymode/libs2/rope/base/fscommands.py index 3bc22044..daf118a0 100644 --- a/pymode/libs2/rope/base/fscommands.py +++ b/pymode/libs2/rope/base/fscommands.py @@ -199,12 +199,14 @@ def unicode_to_file_data(contents, encoding=None): except UnicodeEncodeError: return contents.encode('utf-8') + def file_data_to_unicode(data, encoding=None): result = _decode_data(data, encoding) if '\r' in result: result = result.replace('\r\n', '\n').replace('\r', '\n') return result + def _decode_data(data, encoding): if isinstance(data, unicode): return data @@ -227,7 +229,6 @@ def read_file_coding(path): file = open(path, 'b') count = 0 result = [] - buffsize = 10 while True: current = file.read(10) if not current: diff --git a/pymode/libs2/rope/base/libutils.py b/pymode/libs2/rope/base/libutils.py index cb9381e3..4037f183 100644 --- a/pymode/libs2/rope/base/libutils.py +++ b/pymode/libs2/rope/base/libutils.py @@ -3,6 +3,8 @@ import rope.base.project import rope.base.pycore +from rope.base import pyobjectsdef +from rope.base import utils from rope.base import taskhandle @@ -17,7 +19,7 @@ def path_to_resource(project, path, type=None): `Project.get_file()`, and `Project.get_folder()` methods. """ - project_path = relative(project.address, path) + project_path = path_relative_to_project_root(project, path) if project_path is None: project_path = rope.base.project._realpath(path) project = rope.base.project.get_no_project() @@ -29,13 +31,19 @@ def path_to_resource(project, path, type=None): return project.get_folder(project_path) return None + +def path_relative_to_project_root(project, path): + return relative(project.address, path) + +@utils.deprecated() def relative(root, path): root = rope.base.project._realpath(root).replace(os.path.sep, '/') path = rope.base.project._realpath(path).replace(os.path.sep, '/') if path == root: - return '' + return '' if path.startswith(root + '/'): - return path[len(root) + 1:] + return path[len(root) + 1:] + def report_change(project, path, old_content): """Report that the contents of file at `path` was changed @@ -52,14 +60,63 @@ def report_change(project, path, old_content): rope.base.pycore.perform_soa_on_changed_scopes(project, resource, old_content) + +def analyze_module(project, resource): + """Perform static object analysis on a python file in the project + + Note that this might be really time consuming. + """ + project.pycore.analyze_module(resource) + + def analyze_modules(project, task_handle=taskhandle.NullTaskHandle()): """Perform static object analysis on all python files in the project Note that this might be really time consuming. """ - resources = project.pycore.get_python_files() + resources = project.get_python_files() job_set = task_handle.create_jobset('Analyzing Modules', len(resources)) for resource in resources: job_set.started_job(resource.path) - project.pycore.analyze_module(resource) + analyze_module(project, resource) job_set.finished_job() + + +def get_string_module(project, code, resource=None, force_errors=False): + """Returns a `PyObject` object for the given code + + If `force_errors` is `True`, `exceptions.ModuleSyntaxError` is + raised if module has syntax errors. This overrides + ``ignore_syntax_errors`` project config. + + """ + return pyobjectsdef.PyModule(project.pycore, code, resource, + force_errors=force_errors) + + +def get_string_scope(project, code, resource=None): + """Returns a `Scope` object for the given code""" + return get_string_module(project, code, resource).get_scope() + + +def is_python_file(project, resource): + return project.pycore.is_python_file(resource) + + +def modname(resource): + if resource.is_folder(): + module_name = resource.name + source_folder = resource.parent + elif resource.name == '__init__.py': + module_name = resource.parent.name + source_folder = resource.parent.parent + else: + module_name = resource.name[:-3] + source_folder = resource.parent + + while source_folder != source_folder.parent and \ + source_folder.has_child('__init__.py'): + module_name = source_folder.name + '.' + module_name + source_folder = source_folder.parent + + return module_name diff --git a/pymode/libs2/rope/base/oi/doa.py b/pymode/libs2/rope/base/oi/doa.py index 12f50553..1b2a00fc 100644 --- a/pymode/libs2/rope/base/oi/doa.py +++ b/pymode/libs2/rope/base/oi/doa.py @@ -25,11 +25,11 @@ def run(self): """Execute the process""" env = dict(os.environ) file_path = self.file.real_path - path_folders = self.pycore.get_source_folders() + \ - self.pycore.get_python_path_folders() + path_folders = self.pycore.project.get_source_folders() + \ + self.pycore.project.get_python_path_folders() env['PYTHONPATH'] = os.pathsep.join(folder.real_path for folder in path_folders) - runmod_path = self.pycore.find_module('rope.base.oi.runmod').real_path + runmod_path = self.pycore.project.find_module('rope.base.oi.runmod').real_path self.receiver = None self._init_data_receiving() send_info = '-' @@ -56,7 +56,8 @@ def _init_data_receiving(self): self.receiver = _SocketReceiver() else: self.receiver = _FIFOReceiver() - self.receiving_thread = threading.Thread(target=self._receive_information) + self.receiving_thread = threading.Thread( + target=self._receive_information) self.receiving_thread.setDaemon(True) self.receiving_thread.start() @@ -114,7 +115,7 @@ def __init__(self): try: self.server_socket.bind(('', self.data_port)) break - except socket.error, e: + except socket.error: self.data_port += 1 self.server_socket.listen(1) diff --git a/pymode/libs2/rope/base/oi/runmod.py b/pymode/libs2/rope/base/oi/runmod.py index 8170623c..e332d7e6 100644 --- a/pymode/libs2/rope/base/oi/runmod.py +++ b/pymode/libs2/rope/base/oi/runmod.py @@ -40,9 +40,9 @@ def send_data(self, data): def close(self): self.my_file.close() - def _cached(func): cache = {} + def newfunc(self, arg): if arg in cache: return cache[arg] @@ -76,7 +76,8 @@ def on_function_call(self, frame, event, arg): code = frame.f_code for argname in code.co_varnames[:code.co_argcount]: try: - args.append(self._object_to_persisted_form(frame.f_locals[argname])) + args.append(self._object_to_persisted_form( + frame.f_locals[argname])) except (TypeError, AttributeError): args.append(('unknown',)) try: @@ -94,17 +95,19 @@ def on_function_call(self, frame, event, arg): def _is_an_interesting_call(self, frame): #if frame.f_code.co_name in ['?', '']: # return False - #return not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code) + #return not frame.f_back or + # not self._is_code_inside_project(frame.f_back.f_code) if not self._is_code_inside_project(frame.f_code) and \ - (not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)): + (not frame.f_back or + not self._is_code_inside_project(frame.f_back.f_code)): return False return True def _is_code_inside_project(self, code): source = self._path(code.co_filename) return source is not None and os.path.exists(source) and \ - _realpath(source).startswith(self.project_root) + _realpath(source).startswith(self.project_root) @_cached def _get_persisted_code(self, object_): @@ -128,7 +131,8 @@ def _get_persisted_builtin(self, object_): holding = None if len(object_) > 0: holding = object_[0] - return ('builtin', 'list', self._object_to_persisted_form(holding)) + return ('builtin', 'list', + self._object_to_persisted_form(holding)) if isinstance(object_, dict): keys = None values = None @@ -152,7 +156,8 @@ def _get_persisted_builtin(self, object_): for o in object_: holding = o break - return ('builtin', 'set', self._object_to_persisted_form(holding)) + return ('builtin', 'set', + self._object_to_persisted_form(holding)) return ('unknown',) def _object_to_persisted_form(self, object_): diff --git a/pymode/libs2/rope/base/oi/soa.py b/pymode/libs2/rope/base/oi/soa.py index 38cd5c9d..a34b970e 100644 --- a/pymode/libs2/rope/base/oi/soa.py +++ b/pymode/libs2/rope/base/oi/soa.py @@ -26,9 +26,11 @@ def _analyze_node(pycore, pydefined, should_analyze, new_followed_calls = max(0, followed_calls - 1) return_true = lambda pydefined: True return_false = lambda pydefined: False + def _follow(pyfunction): _analyze_node(pycore, pyfunction, return_true, return_false, new_followed_calls) + if not followed_calls: _follow = None visitor = SOAVisitor(pycore, pydefined, _follow) @@ -113,7 +115,8 @@ def _Assign(self, node): args_pynames.append(evaluate.eval_node(self.scope, subscript.slice.value)) value = rope.base.oi.soi._infer_assignment( - rope.base.pynames.AssignmentValue(node.value, levels), self.pymodule) + rope.base.pynames.AssignmentValue(node.value, levels), + self.pymodule) args_pynames.append(rope.base.pynames.UnboundName(value)) if instance is not None and value is not None: pyobject = instance.get_object() diff --git a/pymode/libs2/rope/base/oi/soi.py b/pymode/libs2/rope/base/oi/soi.py index bf40af90..5a11b5ef 100644 --- a/pymode/libs2/rope/base/oi/soi.py +++ b/pymode/libs2/rope/base/oi/soi.py @@ -30,6 +30,7 @@ def infer_returned_object(pyfunction, args): return result return object_info.get_returned(pyfunction, args) + @_ignore_inferred def infer_parameter_objects(pyfunction): """Infer the `PyObject`\s of parameters of this `PyFunction`""" @@ -40,6 +41,7 @@ def infer_parameter_objects(pyfunction): _handle_first_parameter(pyfunction, result) return result + def _handle_first_parameter(pyobject, parameters): kind = pyobject.get_kind() if parameters is None or kind not in ['method', 'classmethod']: @@ -53,6 +55,7 @@ def _handle_first_parameter(pyobject, parameters): if kind == 'classmethod': parameters[0] = pyobject.parent + @_ignore_inferred def infer_assigned_object(pyname): if not pyname.assignments: @@ -62,6 +65,7 @@ def infer_assigned_object(pyname): if result is not None: return result + def get_passed_objects(pyfunction, parameter_index): object_info = pyfunction.pycore.object_info result = object_info.get_passed_objects(pyfunction, @@ -72,6 +76,7 @@ def get_passed_objects(pyfunction, parameter_index): result.append(statically_inferred[parameter_index]) return result + def _infer_returned(pyobject, args): if args: # HACK: Setting parameter objects manually @@ -99,12 +104,14 @@ def _infer_returned(pyobject, args): except rope.base.pyobjects.IsBeingInferredError: pass + def _parameter_objects(pyobject): params = pyobject.get_param_names(special_args=False) return [rope.base.pyobjects.get_unknown()] * len(params) # handling `rope.base.pynames.AssignmentValue` + @_ignore_inferred def _infer_assignment(assignment, pymodule): result = _follow_pyname(assignment, pymodule) @@ -116,6 +123,7 @@ def _infer_assignment(assignment, pymodule): return None return _follow_levels(assignment, pyobject) + def _follow_levels(assignment, pyobject): for index in assignment.levels: if isinstance(pyobject.get_type(), rope.base.builtins.Tuple): @@ -132,6 +140,7 @@ def _follow_levels(assignment, pyobject): break return pyobject + @_ignore_inferred def _follow_pyname(assignment, pymodule, lineno=None): assign_node = assignment.ast_node @@ -149,6 +158,7 @@ def _follow_pyname(assignment, pymodule, lineno=None): arguments.ObjectArguments([arg])) return pyname, result + @_ignore_inferred def _follow_evaluations(assignment, pyname, pyobject): new_pyname = pyname @@ -181,6 +191,7 @@ def _get_lineno_for_node(assign_node): return assign_node.lineno return 1 + def _get_attribute(pyobject, name): if pyobject is not None and name in pyobject: return pyobject[name] diff --git a/pymode/libs2/rope/base/oi/transform.py b/pymode/libs2/rope/base/oi/transform.py index 5a9d600e..aa29c373 100644 --- a/pymode/libs2/rope/base/oi/transform.py +++ b/pymode/libs2/rope/base/oi/transform.py @@ -120,7 +120,6 @@ def transform(self, textual): return None def builtin_to_pyobject(self, textual): - name = textual[1] method = getattr(self, 'builtin_%s_to_pyobject' % textual[1], None) if method is not None: return method(textual) @@ -203,7 +202,7 @@ def instance_to_pyobject(self, textual): def _get_pymodule(self, path): resource = self.path_to_resource(path) if resource is not None: - return self.project.pycore.resource_to_pyobject(resource) + return self.project.get_pymodule(resource) def path_to_resource(self, path): try: @@ -221,7 +220,7 @@ def path_to_resource(self, path): class DOITextualToPyObject(TextualToPyObject): """For transforming textual form to `PyObject` - + The textual form DOI uses is different from rope's standard textual form. The reason is that we cannot find the needed information by analyzing live objects. This class can be @@ -253,7 +252,8 @@ def _class_to_pyobject(self, textual): isinstance(suspected, rope.base.pyobjects.PyClass): return suspected else: - lineno = self._find_occurrence(name, pymodule.get_resource().read()) + lineno = self._find_occurrence(name, + pymodule.get_resource().read()) if lineno is not None: inner_scope = module_scope.get_inner_scope_for_line(lineno) return inner_scope.pyobject @@ -278,8 +278,8 @@ def _find_occurrence(self, name, source): def path_to_resource(self, path): import rope.base.libutils - root = self.project.address - relpath = rope.base.libutils.relative(root, path) + relpath = rope.base.libutils.path_relative_to_project_root( + self.project, path) if relpath is not None: path = relpath return super(DOITextualToPyObject, self).path_to_resource(path) diff --git a/pymode/libs2/rope/base/prefs.py b/pymode/libs2/rope/base/prefs.py index 674a58ec..2ab45dac 100644 --- a/pymode/libs2/rope/base/prefs.py +++ b/pymode/libs2/rope/base/prefs.py @@ -27,7 +27,7 @@ def get(self, key, default=None): def add_callback(self, key, callback): """Add `key` preference with `callback` function - + Whenever `key` is set the callback is called with the given `value` as parameter. diff --git a/pymode/libs2/rope/base/project.py b/pymode/libs2/rope/base/project.py index 97d2dd3e..23597f8c 100644 --- a/pymode/libs2/rope/base/project.py +++ b/pymode/libs2/rope/base/project.py @@ -6,8 +6,9 @@ import rope.base.fscommands from rope.base import exceptions, taskhandle, prefs, history, pycore, utils -from rope.base.resourceobserver import * +import rope.base.resourceobserver as resourceobserver from rope.base.resources import File, Folder, _ResourceMatcher +from rope.base.exceptions import ModuleNotFoundError class _Project(object): @@ -17,6 +18,7 @@ def __init__(self, fscommands): self.fscommands = fscommands self.prefs = prefs.Prefs() self.data_files = _DataFiles(self) + self._custom_source_folders = [] def get_resource(self, resource_name): """Get a resource in a project. @@ -41,6 +43,40 @@ def get_resource(self, resource_name): raise exceptions.ResourceNotFoundError('Unknown resource ' + resource_name) + def get_module(self, name, folder=None): + """Returns a `PyObject` if the module was found.""" + # check if this is a builtin module + pymod = self.pycore.builtin_module(name) + if pymod is not None: + return pymod + module = self.find_module(name, folder) + if module is None: + raise ModuleNotFoundError('Module %s not found' % name) + return self.pycore.resource_to_pyobject(module) + + def get_python_path_folders(self): + result = [] + for src in self.prefs.get('python_path', []) + sys.path: + try: + src_folder = get_no_project().get_resource(src) + result.append(src_folder) + except exceptions.ResourceNotFoundError: + pass + return result + + # INFO: It was decided not to cache source folders, since: + # - Does not take much time when the root folder contains + # packages, that is most of the time + # - We need a separate resource observer; `self.observer` + # does not get notified about module and folder creations + def get_source_folders(self): + """Returns project source folders""" + if self.root is None: + return [] + result = list(self._custom_source_folders) + result.extend(self.pycore._find_source_folders(self.root)) + return result + def validate(self, folder): """Validate files and folders contained in this folder @@ -71,6 +107,9 @@ def do(self, changes, task_handle=taskhandle.NullTaskHandle()): """ self.history.do(changes, task_handle=task_handle) + def get_pymodule(self, resource, force_errors=False): + return self.pycore.resource_to_pyobject(resource, force_errors) + def get_pycore(self): return self.pycore @@ -82,12 +121,45 @@ def get_folder(self, path): """Get the folder with `path` (it may not exist)""" return Folder(self, path) - def is_ignored(self, resource): - return False - def get_prefs(self): return self.prefs + def get_relative_module(self, name, folder, level): + module = self.find_relative_module(name, folder, level) + if module is None: + raise ModuleNotFoundError('Module %s not found' % name) + return self.pycore.resource_to_pyobject(module) + + def find_module(self, modname, folder=None): + """Returns a resource corresponding to the given module + + returns None if it can not be found + """ + for src in self.get_source_folders(): + module = _find_module_in_folder(src, modname) + if module is not None: + return module + for src in self.get_python_path_folders(): + module = _find_module_in_folder(src, modname) + if module is not None: + return module + if folder is not None: + module = _find_module_in_folder(folder, modname) + if module is not None: + return module + return None + + def find_relative_module(self, modname, folder, level): + for i in range(level - 1): + folder = folder.parent + if modname == '': + return folder + else: + return _find_module_in_folder(folder, modname) + + def is_ignored(self, resource): + return False + def _get_resource_path(self, name): pass @@ -144,10 +216,22 @@ def __init__(self, projectroot, fscommands=None, if ropefolder is not None: self.prefs['ignored_resources'] = [ropefolder] self._init_prefs(prefs) + self._init_source_folders() + + @utils.deprecated('Delete once deprecated functions are gone') + def _init_source_folders(self): + for path in self.prefs.get('source_folders', []): + folder = self.get_resource(path) + self._custom_source_folders.append(folder) def get_files(self): return self.file_list.get_files() + def get_python_files(self): + """Returns all python files available in the project""" + return [resource for resource in self.get_files() + if self.pycore.is_python_file(resource)] + def _get_resource_path(self, name): return os.path.join(self._address, *name.split('/')) @@ -244,6 +328,9 @@ def get_resource(self, name): def get_files(self): return [] + def get_python_files(self): + return [] + _no_project = None @@ -258,7 +345,7 @@ class _FileListCacher(object): def __init__(self, project): self.project = project self.files = None - rawobserver = ResourceObserver( + rawobserver = resourceobserver.ResourceObserver( self._changed, self._invalid, self._invalid, self._invalid, self._invalid) self.project.add_observer(rawobserver) @@ -334,7 +421,7 @@ def write(self): def _can_compress(self): try: - import gzip + import gzip # noqa return True except ImportError: return False @@ -371,5 +458,24 @@ def _realpath(path): if sys.platform == 'cygwin': if path[1:3] == ':\\': return path + elif path[1:3] == ':/': + path = "/cygdrive/" + path[0] + path[2:] return os.path.abspath(os.path.expanduser(path)) return os.path.realpath(os.path.abspath(os.path.expanduser(path))) + + +def _find_module_in_folder(folder, modname): + module = folder + packages = modname.split('.') + for pkg in packages[:-1]: + if module.is_folder() and module.has_child(pkg): + module = module.get_child(pkg) + else: + return None + if module.is_folder(): + if module.has_child(packages[-1]) and \ + module.get_child(packages[-1]).is_folder(): + return module.get_child(packages[-1]) + elif module.has_child(packages[-1] + '.py') and \ + not module.get_child(packages[-1] + '.py').is_folder(): + return module.get_child(packages[-1] + '.py') diff --git a/pymode/libs2/rope/base/pycore.py b/pymode/libs2/rope/base/pycore.py index 32056a0f..c4c1195a 100644 --- a/pymode/libs2/rope/base/pycore.py +++ b/pymode/libs2/rope/base/pycore.py @@ -3,15 +3,19 @@ import sys import warnings +import rope.base.libutils +import rope.base.resourceobserver +import rope.base.resources import rope.base.oi.doa import rope.base.oi.objectinfo import rope.base.oi.soa -from rope.base import ast, exceptions, taskhandle, utils, stdmods -from rope.base.exceptions import ModuleNotFoundError -from rope.base.pyobjectsdef import PyModule, PyPackage, PyClass -import rope.base.resources -import rope.base.resourceobserver from rope.base import builtins +from rope.base import exceptions +from rope.base import stdmods +from rope.base import taskhandle +from rope.base import utils +from rope.base.exceptions import ModuleNotFoundError +from rope.base.pyobjectsdef import PyModule, PyPackage class PyCore(object): @@ -25,7 +29,6 @@ def __init__(self, project): self.object_info = rope.base.oi.objectinfo.ObjectInfoManager(project) self._init_python_files() self._init_automatic_soa() - self._init_source_folders() def _init_python_files(self): self.python_matcher = None @@ -38,15 +41,10 @@ def _init_resource_observer(self): callback = self._invalidate_resource_cache observer = rope.base.resourceobserver.ResourceObserver( changed=callback, moved=callback, removed=callback) - self.observer = rope.base.resourceobserver.FilteredResourceObserver(observer) + self.observer = \ + rope.base.resourceobserver.FilteredResourceObserver(observer) self.project.add_observer(self.observer) - def _init_source_folders(self): - self._custom_source_folders = [] - for path in self.project.prefs.get('source_folders', []): - folder = self.project.get_resource(path) - self._custom_source_folders.append(folder) - def _init_automatic_soa(self): if not self.automatic_soa: return @@ -62,7 +60,7 @@ def automatic_soa(self): def _file_changed_for_soa(self, resource, new_resource=None): old_contents = self.project.history.\ - contents_before_current_change(resource) + contents_before_current_change(resource) if old_contents is not None: perform_soa_on_changed_scopes(self.project, resource, old_contents) @@ -73,16 +71,10 @@ def is_python_file(self, resource): return resource.name.endswith('.py') return self.python_matcher.does_match(resource) + @utils.deprecated('Use `project.get_module` instead') def get_module(self, name, folder=None): """Returns a `PyObject` if the module was found.""" - # check if this is a builtin module - pymod = self._builtin_module(name) - if pymod is not None: - return pymod - module = self.find_module(name, folder) - if module is None: - raise ModuleNotFoundError('Module %s not found' % name) - return self.resource_to_pyobject(module) + return self.project.get_module(name, folder) def _builtin_submodules(self, modname): result = {} @@ -90,18 +82,17 @@ def _builtin_submodules(self, modname): if extension.startswith(modname + '.'): name = extension[len(modname) + 1:] if '.' not in name: - result[name] = self._builtin_module(extension) + result[name] = self.builtin_module(extension) return result - def _builtin_module(self, name): + def builtin_module(self, name): return self.extension_cache.get_pymodule(name) + @utils.deprecated('Use `project.get_relative_module` instead') def get_relative_module(self, name, folder, level): - module = self.find_relative_module(name, folder, level) - if module is None: - raise ModuleNotFoundError('Module %s not found' % name) - return self.resource_to_pyobject(module) + return self.project.get_relative_module(name, folder, level) + @utils.deprecated('Use `libutils.get_string_module` instead') def get_string_module(self, code, resource=None, force_errors=False): """Returns a `PyObject` object for the given code @@ -112,92 +103,48 @@ def get_string_module(self, code, resource=None, force_errors=False): """ return PyModule(self, code, resource, force_errors=force_errors) + @utils.deprecated('Use `libutils.get_string_scope` instead') def get_string_scope(self, code, resource=None): """Returns a `Scope` object for the given code""" - return self.get_string_module(code, resource).get_scope() + return rope.base.libutils.get_string_scope(code, resource) def _invalidate_resource_cache(self, resource, new_resource=None): for observer in self.cache_observers: observer(resource) - def _find_module_in_folder(self, folder, modname): - module = folder - packages = modname.split('.') - for pkg in packages[:-1]: - if module.is_folder() and module.has_child(pkg): - module = module.get_child(pkg) - else: - return None - if module.is_folder(): - if module.has_child(packages[-1]) and \ - module.get_child(packages[-1]).is_folder(): - return module.get_child(packages[-1]) - elif module.has_child(packages[-1] + '.py') and \ - not module.get_child(packages[-1] + '.py').is_folder(): - return module.get_child(packages[-1] + '.py') - + @utils.deprecated('Use `project.get_python_path_folders` instead') def get_python_path_folders(self): - import rope.base.project - result = [] - for src in self.project.prefs.get('python_path', []) + sys.path: - try: - src_folder = rope.base.project.get_no_project().get_resource(src) - result.append(src_folder) - except rope.base.exceptions.ResourceNotFoundError: - pass - return result + return self.project.get_python_path_folders() + @utils.deprecated('Use `project.find_module` instead') def find_module(self, modname, folder=None): """Returns a resource corresponding to the given module returns None if it can not be found """ - return self._find_module(modname, folder) + return self.project.find_module(modname, folder) + @utils.deprecated('Use `project.find_relative_module` instead') def find_relative_module(self, modname, folder, level): - for i in range(level - 1): - folder = folder.parent - if modname == '': - return folder - else: - return self._find_module_in_folder(folder, modname) - - def _find_module(self, modname, folder=None): - """Return `modname` module resource""" - for src in self.get_source_folders(): - module = self._find_module_in_folder(src, modname) - if module is not None: - return module - for src in self.get_python_path_folders(): - module = self._find_module_in_folder(src, modname) - if module is not None: - return module - if folder is not None: - module = self._find_module_in_folder(folder, modname) - if module is not None: - return module - return None + return self.project.find_relative_module(modname, folder, level) # INFO: It was decided not to cache source folders, since: # - Does not take much time when the root folder contains # packages, that is most of the time # - We need a separate resource observer; `self.observer` # does not get notified about module and folder creations + @utils.deprecated('Use `project.get_source_folders` instead') def get_source_folders(self): """Returns project source folders""" - if self.project.root is None: - return [] - result = list(self._custom_source_folders) - result.extend(self._find_source_folders(self.project.root)) - return result + return self.project.get_source_folders() def resource_to_pyobject(self, resource, force_errors=False): return self.module_cache.get_pymodule(resource, force_errors) + @utils.deprecated('Use `project.get_python_files` instead') def get_python_files(self): """Returns all python files available in the project""" - return [resource for resource in self.project.get_files() - if self.is_python_file(resource)] + return self.project.get_python_files() def _is_package(self, folder): if folder.has_child('__init__.py') and \ @@ -270,22 +217,9 @@ def get_classes(self, task_handle=taskhandle.NullTaskHandle()): def __str__(self): return str(self.module_cache) + str(self.object_info) + @utils.deprecated('Use `libutils.modname` instead') def modname(self, resource): - if resource.is_folder(): - module_name = resource.name - source_folder = resource.parent - elif resource.name == '__init__.py': - module_name = resource.parent.name - source_folder = resource.parent.parent - else: - module_name = resource.name[:-3] - source_folder = resource.parent - - while source_folder != source_folder.parent and \ - source_folder.has_child('__init__.py'): - module_name = source_folder.name + '.' + module_name - source_folder = source_folder.parent - return module_name + return rope.base.libutils.modname(resource) @property @utils.cacheit @@ -355,9 +289,11 @@ def perform_soa_on_changed_scopes(project, resource, old_contents): new_contents = resource.read() # detecting changes in new_contents relative to old_contents detector = _TextChangeDetector(new_contents, old_contents) + def search_subscopes(pydefined): scope = pydefined.get_scope() return detector.is_changed(scope.get_start(), scope.get_end()) + def should_analyze(pydefined): scope = pydefined.get_scope() start = scope.get_start() diff --git a/pymode/libs2/rope/base/pynames.py b/pymode/libs2/rope/base/pynames.py index 79bba156..5d489814 100644 --- a/pymode/libs2/rope/base/pynames.py +++ b/pymode/libs2/rope/base/pynames.py @@ -57,7 +57,7 @@ def __init__(self, ast_node, levels=None, evaluation='', """ self.ast_node = ast_node - if levels == None: + if levels is None: self.levels = [] else: self.levels = levels @@ -112,15 +112,16 @@ def _get_pymodule(self): if self.pymodule.get() is None: pycore = self.importing_module.pycore if self.resource is not None: - self.pymodule.set(pycore.resource_to_pyobject(self.resource)) + self.pymodule.set(pycore.project.get_pymodule(self.resource)) elif self.module_name is not None: try: if self.level == 0: - pymodule = pycore.get_module(self.module_name, - self._current_folder()) + pymodule = pycore.project.get_module( + self.module_name, self._current_folder()) else: - pymodule = pycore.get_relative_module( - self.module_name, self._current_folder(), self.level) + pymodule = pycore.project.get_relative_module( + self.module_name, self._current_folder(), + self.level) self.pymodule.set(pymodule) except exceptions.ModuleNotFoundError: pass @@ -172,6 +173,7 @@ def _circular_inference(): raise rope.base.pyobjects.IsBeingInferredError( 'Circular Object Inference') + class _Inferred(object): def __init__(self, get_inferred, concluded=None): diff --git a/pymode/libs2/rope/base/pyobjectsdef.py b/pymode/libs2/rope/base/pyobjectsdef.py index 50b24360..a738b4de 100644 --- a/pymode/libs2/rope/base/pyobjectsdef.py +++ b/pymode/libs2/rope/base/pyobjectsdef.py @@ -3,16 +3,17 @@ import rope.base.builtins import rope.base.oi.soi import rope.base.pyscopes +import rope.base.libutils from rope.base import (pynamesdef as pynames, exceptions, ast, astutils, pyobjects, fscommands, arguments, utils) -from rope.base.pyobjects import * class PyFunction(pyobjects.PyFunction): def __init__(self, pycore, ast_node, parent): - AbstractFunction.__init__(self) - PyDefinedObject.__init__(self, pycore, ast_node, parent) + rope.base.pyobjects.AbstractFunction.__init__(self) + rope.base.pyobjects.PyDefinedObject.__init__( + self, pycore, ast_node, parent) self.arguments = self.ast_node.args self.parameter_pyobjects = pynames._Inferred( self._infer_parameters, self.get_module()._get_concluded_data()) @@ -109,8 +110,9 @@ class PyClass(pyobjects.PyClass): def __init__(self, pycore, ast_node, parent): self.visitor_class = _ClassVisitor - AbstractClass.__init__(self) - PyDefinedObject.__init__(self, pycore, ast_node, parent) + rope.base.pyobjects.AbstractClass.__init__(self) + rope.base.pyobjects.PyDefinedObject.__init__( + self, pycore, ast_node, parent) self.parent = parent self._superclasses = self.get_module()._get_concluded_data() @@ -134,8 +136,9 @@ def _get_bases(self): base = rope.base.evaluate.eval_node(self.parent.get_scope(), base_name) if base is not None and \ - base.get_object().get_type() == get_base_type('Type'): - result.append(base.get_object()) + base.get_object().get_type() == \ + rope.base.pyobjects.get_base_type('Type'): + result.append(base.get_object()) return result def _create_scope(self): @@ -213,7 +216,7 @@ def __init__(self, pycore, resource=None, force_errors=False): self.resource = resource init_dot_py = self._get_init_dot_py() if init_dot_py is not None: - ast_node = pycore.resource_to_pyobject( + ast_node = pycore.project.get_pymodule( init_dot_py, force_errors=force_errors).get_ast() else: ast_node = ast.parse('\n') @@ -221,7 +224,7 @@ def __init__(self, pycore, resource=None, force_errors=False): def _create_structural_attributes(self): result = {} - modname = self.pycore.modname(self.resource) + modname = rope.base.libutils.modname(self.resource) extension_submodules = self.pycore._builtin_submodules(modname) for name, module in extension_submodules.iteritems(): result[name] = rope.base.builtins.BuiltinName(module) @@ -235,7 +238,7 @@ def _create_concluded_attributes(self): result = {} init_dot_py = self._get_init_dot_py() if init_dot_py: - init_object = self.pycore.resource_to_pyobject(init_dot_py) + init_object = self.pycore.project.get_pymodule(init_dot_py) result.update(init_object.get_attributes()) return result @@ -245,13 +248,14 @@ def _get_child_resources(self): if child.is_folder(): result[child.name] = child elif child.name.endswith('.py') and \ - child.name != '__init__.py': + child.name != '__init__.py': name = child.name[:-3] result[name] = child return result def _get_init_dot_py(self): - if self.resource is not None and self.resource.has_child('__init__.py'): + if self.resource is not None and \ + self.resource.has_child('__init__.py'): return self.resource.get_child('__init__.py') else: return None @@ -262,7 +266,7 @@ def _create_scope(self): def get_module(self): init_dot_py = self._get_init_dot_py() if init_dot_py: - return self.pycore.resource_to_pyobject(init_dot_py) + return self.pycore.project.get_pymodule(init_dot_py) return self @@ -329,7 +333,9 @@ def _FunctionDef(self, node): if isinstance(decorator, ast.Name) and decorator.id == 'property': if isinstance(self, _ClassVisitor): type_ = rope.base.builtins.Property(pyfunction) - arg = pynames.UnboundName(PyObject(self.owner_object)) + arg = pynames.UnboundName( + rope.base.pyobjects.PyObject(self.owner_object)) + def _eval(type_=type_, arg=arg): return type_.get_property_object( arguments.ObjectArguments([arg])) @@ -347,7 +353,7 @@ def _AugAssign(self, node): pass def _For(self, node): - names = self._update_evaluated(node.target, node.iter, + names = self._update_evaluated(node.target, node.iter, # noqa '.__iter__().next()') for child in node.body + node.orelse: ast.walk(child, self) @@ -362,7 +368,7 @@ def _assigned(self, name, assignment): self.names[name] = pyname def _update_evaluated(self, targets, assigned, - evaluation= '', eval_type=False): + evaluation='', eval_type=False): result = {} names = astutils.get_name_levels(targets) for name, levels in names: @@ -430,7 +436,8 @@ def _ImportFrom(self, node): def _is_ignored_import(self, imported_module): if not self.pycore.project.prefs.get('ignore_bad_imports', False): return False - return not isinstance(imported_module.get_object(), AbstractModule) + return not isinstance(imported_module.get_object(), + rope.base.pyobjects.AbstractModule) def _Global(self, node): module = self.get_module() diff --git a/pymode/libs2/rope/base/pyscopes.py b/pymode/libs2/rope/base/pyscopes.py index a00381b7..0bed19a9 100644 --- a/pymode/libs2/rope/base/pyscopes.py +++ b/pymode/libs2/rope/base/pyscopes.py @@ -230,8 +230,8 @@ def get_holding_scope(self, module_scope, lineno, line_indents=None): current_scope = module_scope new_scope = current_scope while new_scope is not None and \ - (new_scope.get_kind() == 'Module' or - self._get_scope_indents(new_scope) <= line_indents): + (new_scope.get_kind() == 'Module' or + self._get_scope_indents(new_scope) <= line_indents): current_scope = new_scope if current_scope.get_start() == lineno and \ current_scope.get_kind() != 'Module': @@ -268,7 +268,7 @@ def find_scope_end(self, scope): else: body_indents = self._get_body_indents(scope) for l in self.logical_lines.generate_starts( - min(end + 1, self.lines.length()), self.lines.length() + 1): + min(end + 1, self.lines.length()), self.lines.length() + 1): if not self._is_empty_line(l): if self.get_indents(l) < body_indents: return end @@ -288,6 +288,7 @@ def code(self): def logical_lines(self): return self.pymodule.logical_lines + class TemporaryScope(Scope): """Currently used for list comprehensions and generator expressions diff --git a/pymode/libs2/rope/base/resourceobserver.py b/pymode/libs2/rope/base/resourceobserver.py index 6d1accbc..7c0937d5 100644 --- a/pymode/libs2/rope/base/resourceobserver.py +++ b/pymode/libs2/rope/base/resourceobserver.py @@ -231,7 +231,8 @@ def _search_resource_changes(self, resource): def _is_changed(self, resource): if self.resources[resource] is None: return False - return self.resources[resource] != self.timekeeper.get_indicator(resource) + return self.resources[resource] != \ + self.timekeeper.get_indicator(resource) def _calculate_new_resource(self, main, new_main, resource): if new_main is None: diff --git a/pymode/libs2/rope/base/resources.py b/pymode/libs2/rope/base/resources.py index 46beadb0..aac755f0 100644 --- a/pymode/libs2/rope/base/resources.py +++ b/pymode/libs2/rope/base/resources.py @@ -1,9 +1,37 @@ +"""Files and folders in a project are represented as resource objects. + +Files and folders are access through `Resource` objects. `Resource` has +two subclasses: `File` and `Folder`. What we care about is that +refactorings and `rope.base.change.Change`s use resources. + +There are two options to create a `Resource` for a path in a project. +Note that in these examples `path` is the path to a file or folder +relative to the project's root. A project's root folder is represented +by an empty string. + + 1) Use the `rope.base.Project.get_resource()` method. E.g.: + + myresource = myproject.get_resource(path) + + + 2) Use the `rope.base.libutils` module. `libutils` has a function + named `path_to_resource()`. It takes a project and a path: + + from rope.base import libutils + + myresource = libutils.path_to_resource(myproject, path) + +Once we have a `Resource`, we can retrieve information from it, like +getting the path relative to the project's root (via `path`), reading +from and writing to the resource, moving the resource, etc. +""" + import os import re -import rope.base.change -import rope.base.fscommands +from rope.base import change from rope.base import exceptions +from rope.base import fscommands class Resource(object): @@ -15,12 +43,12 @@ def __init__(self, project, path): def move(self, new_location): """Move resource to `new_location`""" - self._perform_change(rope.base.change.MoveResource(self, new_location), + self._perform_change(change.MoveResource(self, new_location), 'Moving <%s> to <%s>' % (self.path, new_location)) def remove(self): """Remove resource from the project""" - self._perform_change(rope.base.change.RemoveResource(self), + self._perform_change(change.RemoveResource(self), 'Removing <%s>' % self.path) def is_folder(self): @@ -66,7 +94,7 @@ def __hash__(self): return hash(self.path) def _perform_change(self, change_, description): - changes = rope.base.change.ChangeSet(description) + changes = change.ChangeSet(description) changes.add_change(change_) self.project.do(changes) @@ -80,7 +108,7 @@ def __init__(self, project, name): def read(self): data = self.read_bytes() try: - return rope.base.fscommands.file_data_to_unicode(data) + return fscommands.file_data_to_unicode(data) except UnicodeDecodeError, e: raise exceptions.ModuleDecodeError(self.path, e.reason) @@ -93,7 +121,7 @@ def write(self, contents): return except IOError: pass - self._perform_change(rope.base.change.ChangeContents(self, contents), + self._perform_change(change.ChangeContents(self, contents), 'Writing file <%s>' % self.path) def is_folder(self): @@ -114,8 +142,12 @@ def is_folder(self): def get_children(self): """Return the children of this folder""" + try: + children = os.listdir(self.real_path) + except OSError: + return [] result = [] - for name in os.listdir(self.real_path): + for name in children: try: child = self.get_child(name) except exceptions.ResourceNotFoundError: @@ -126,13 +158,13 @@ def get_children(self): def create_file(self, file_name): self._perform_change( - rope.base.change.CreateFile(self, file_name), + change.CreateFile(self, file_name), 'Creating file <%s>' % self._get_child_path(file_name)) return self.get_child(file_name) def create_folder(self, folder_name): self._perform_change( - rope.base.change.CreateFolder(self, folder_name), + change.CreateFolder(self, folder_name), 'Creating folder <%s>' % self._get_child_path(folder_name)) return self.get_child(folder_name) @@ -187,8 +219,8 @@ def set_patterns(self, patterns): def _add_pattern(self, pattern): re_pattern = pattern.replace('.', '\\.').\ - replace('*', '[^/]*').replace('?', '[^/]').\ - replace('//', '/(.*/)?') + replace('*', '[^/]*').replace('?', '[^/]').\ + replace('//', '/(.*/)?') re_pattern = '^(.*/)?' + re_pattern + '(/.*)?$' self.compiled_patterns.append(re.compile(re_pattern)) diff --git a/pymode/libs2/rope/base/stdmods.py b/pymode/libs2/rope/base/stdmods.py index b6c9839b..457a4fac 100644 --- a/pymode/libs2/rope/base/stdmods.py +++ b/pymode/libs2/rope/base/stdmods.py @@ -6,12 +6,15 @@ def _stdlib_path(): import distutils.sysconfig - return distutils.sysconfig.get_python_lib(standard_lib=True) + return distutils.sysconfig.get_python_lib(standard_lib=True, + plat_specific=True) + @utils.cached(1) def standard_modules(): return python_modules() | dynload_modules() + @utils.cached(1) def python_modules(): result = set() @@ -27,6 +30,7 @@ def python_modules(): result.add(name[:-3]) return result + @utils.cached(1) def dynload_modules(): result = set(sys.builtin_module_names) @@ -35,6 +39,8 @@ def dynload_modules(): for name in os.listdir(dynload_path): path = os.path.join(dynload_path, name) if os.path.isfile(path): - if name.endswith('.so') or name.endswith('.dll'): + if name.endswith('.dll'): result.add(os.path.splitext(name)[0]) + if name.endswith('.so'): + result.add(os.path.splitext(name)[0].replace('module', '')) return result diff --git a/pymode/libs2/rope/base/taskhandle.py b/pymode/libs2/rope/base/taskhandle.py index 6d4ed856..c1f01b98 100644 --- a/pymode/libs2/rope/base/taskhandle.py +++ b/pymode/libs2/rope/base/taskhandle.py @@ -1,5 +1,3 @@ -import warnings - from rope.base import exceptions diff --git a/pymode/libs2/rope/base/utils.py b/pymode/libs2/rope/base/utils.py index e35ecbf3..11556c13 100644 --- a/pymode/libs2/rope/base/utils.py +++ b/pymode/libs2/rope/base/utils.py @@ -5,6 +5,7 @@ def saveit(func): """A decorator that caches the return value of a function""" name = '_' + func.__name__ + def _wrapper(self, *args, **kwds): if not hasattr(self, name): setattr(self, name, func(self, *args, **kwds)) @@ -13,10 +14,12 @@ def _wrapper(self, *args, **kwds): cacheit = saveit + def prevent_recursion(default): """A decorator that returns the return value of `default` in recursions""" def decorator(func): name = '_calling_%s_' % func.__name__ + def newfunc(self, *args, **kwds): if getattr(self, name, False): return default() @@ -46,6 +49,7 @@ def deprecated(message=None): def _decorator(func, message=message): if message is None: message = '%s is deprecated' % func.__name__ + def newfunc(*args, **kwds): warnings.warn(message, DeprecationWarning, stacklevel=2) return func(*args, **kwds) @@ -59,6 +63,7 @@ def decorator(func): return _Cached(func, count) return decorator + class _Cached(object): def __init__(self, func, count): diff --git a/pymode/libs2/rope/base/worder.py b/pymode/libs2/rope/base/worder.py index 08d75f34..c85c6b36 100644 --- a/pymode/libs2/rope/base/worder.py +++ b/pymode/libs2/rope/base/worder.py @@ -257,8 +257,10 @@ def get_splitted_primary_before(self, offset): return (self.raw[real_start:end], '', offset) last_dot_position = word_start if self.code[word_start] != '.': - last_dot_position = self._find_last_non_space_char(word_start - 1) - last_char_position = self._find_last_non_space_char(last_dot_position - 1) + last_dot_position = \ + self._find_last_non_space_char(word_start - 1) + last_char_position = \ + self._find_last_non_space_char(last_dot_position - 1) if self.code[word_start].isspace(): word_start = offset return (self.raw[real_start:last_char_position + 1], @@ -304,8 +306,8 @@ def is_a_function_being_called(self, offset): word_end = self._find_word_end(offset) + 1 next_char = self._find_first_non_space_char(word_end) return next_char < len(self.code) and \ - self.code[next_char] == '(' and \ - not self.is_a_class_or_function_name_in_header(offset) + self.code[next_char] == '(' and \ + not self.is_a_class_or_function_name_in_header(offset) def _find_import_end(self, start): return self._get_line_end(start) @@ -337,7 +339,7 @@ def is_from_statement_module(self, offset): def is_a_name_after_from_import(self, offset): try: - if len(self.code) > offset and self.code[offset] == '\n': + if len(self.code) > offset and self.code[offset] == '\n': line_start = self._get_line_start(offset - 1) else: line_start = self._get_line_start(offset) @@ -405,7 +407,6 @@ def is_on_function_call_keyword(self, offset): def find_parens_start_from_inside(self, offset): stop = self._get_line_start(offset) - opens = 1 while offset > stop: if self.code[offset] == '(': break @@ -501,7 +502,7 @@ def is_assigned_in_a_tuple_assignment(self, offset): parens_start = self.find_parens_start_from_inside(offset) # XXX: only handling (x, y) = value return offset < equals_offset and \ - self.code[start:parens_start].strip() == '' + self.code[start:parens_start].strip() == '' def get_function_and_args_in_header(self, offset): offset = self.find_function_offset(offset) @@ -518,7 +519,7 @@ def find_function_offset(self, offset, definition='def '): return self._find_first_non_space_char(def_) def get_lambda_and_args(self, offset): - offset = self.find_function_offset(offset, definition = 'lambda ') - lparens, rparens = self.get_word_parens_range(offset, opening=' ', closing=':') + offset = self.find_function_offset(offset, definition='lambda ') + lparens, rparens = self.get_word_parens_range(offset, opening=' ', + closing=':') return self.raw[offset:rparens + 1] - diff --git a/pymode/libs2/rope/contrib/autoimport.py b/pymode/libs2/rope/contrib/autoimport.py index 4b7b5b05..9670080c 100644 --- a/pymode/libs2/rope/contrib/autoimport.py +++ b/pymode/libs2/rope/contrib/autoimport.py @@ -1,7 +1,13 @@ import re -from rope.base import (exceptions, pynames, resourceobserver, - taskhandle, pyobjects, builtins, resources) +from rope.base import builtins +from rope.base import exceptions +from rope.base import libutils +from rope.base import pynames +from rope.base import pyobjects +from rope.base import resources +from rope.base import resourceobserver +from rope.base import taskhandle from rope.refactor import importutils @@ -65,11 +71,10 @@ def get_all_names(self): def get_name_locations(self, name): """Return a list of ``(resource, lineno)`` tuples""" result = [] - pycore = self.project.pycore for module in self.names: if name in self.names[module]: try: - pymodule = pycore.get_module(module) + pymodule = self.project.get_module(module) if name in pymodule: pyname = pymodule[name] module, lineno = pyname.get_definition_location() @@ -91,7 +96,7 @@ def generate_cache(self, resources=None, underlined=None, """ if resources is None: - resources = self.project.pycore.get_python_files() + resources = self.project.get_python_files() job_set = task_handle.create_jobset( 'Generatig autoimport cache', len(resources)) for file in resources: @@ -107,7 +112,7 @@ def generate_modules_cache(self, modules, underlined=None, for modname in modules: job_set.started_job('Working on <%s>' % modname) if modname.endswith('.*'): - mod = self.project.pycore.find_module(modname[:-2]) + mod = self.project.find_module(modname[:-2]) if mod: for sub in submodules(mod): self.update_resource(sub, underlined) @@ -130,13 +135,13 @@ def find_insertion_line(self, code): if match is not None: code = code[:match.start()] try: - pymodule = self.project.pycore.get_string_module(code) + pymodule = libutils.get_string_module(self.project, code) except exceptions.ModuleSyntaxError: return 1 testmodname = '__rope_testmodule_rope' importinfo = importutils.NormalImport(((testmodname, None),)) - module_imports = importutils.get_module_imports( - self.project.pycore, pymodule) + module_imports = importutils.get_module_imports(self.project, + pymodule) module_imports.add_import(importinfo) code = module_imports.get_changed_source() offset = code.index(testmodname) @@ -146,7 +151,7 @@ def find_insertion_line(self, code): def update_resource(self, resource, underlined=None): """Update the cache for global names in `resource`""" try: - pymodule = self.project.pycore.resource_to_pyobject(resource) + pymodule = self.project.get_pymodule(resource) modname = self._module_name(resource) self._add_names(pymodule, modname, underlined) except exceptions.ModuleSyntaxError: @@ -158,13 +163,13 @@ def update_module(self, modname, underlined=None): `modname` is the name of a module. """ try: - pymodule = self.project.pycore.get_module(modname) + pymodule = self.project.get_module(modname) self._add_names(pymodule, modname, underlined) except exceptions.ModuleNotFoundError: pass def _module_name(self, resource): - return self.project.pycore.modname(resource) + return libutils.modname(resource) def _add_names(self, pymodule, modname, underlined): if underlined is None: diff --git a/pymode/libs2/rope/contrib/codeassist.py b/pymode/libs2/rope/contrib/codeassist.py index 37433c2a..48b4a813 100644 --- a/pymode/libs2/rope/contrib/codeassist.py +++ b/pymode/libs2/rope/contrib/codeassist.py @@ -4,8 +4,15 @@ import rope.base.codeanalyze import rope.base.evaluate -from rope.base import pyobjects, pyobjectsdef, pynames, builtins, exceptions, worder -from rope.base.codeanalyze import SourceLinesAdapter +from rope.base import builtins +from rope.base import exceptions +from rope.base import libutils +from rope.base import pynames +from rope.base import pynamesdef +from rope.base import pyobjects +from rope.base import pyobjectsdef +from rope.base import pyscopes +from rope.base import worder from rope.contrib import fixsyntax from rope.refactor import functionutils @@ -53,9 +60,7 @@ def starting_offset(source_code, offset): def get_doc(project, source_code, offset, resource=None, maxfixes=1): """Get the pydoc""" - fixer = fixsyntax.FixSyntax(project.pycore, source_code, - resource, maxfixes) - pymodule = fixer.get_pymodule() + fixer = fixsyntax.FixSyntax(project, source_code, resource, maxfixes) pyname = fixer.pyname_at(offset) if pyname is None: return None @@ -88,9 +93,7 @@ def get_calltip(project, source_code, offset, resource=None, If `remove_self` is `True`, the first parameter whose name is self will be removed for methods. """ - fixer = fixsyntax.FixSyntax(project.pycore, source_code, - resource, maxfixes) - pymodule = fixer.get_pymodule() + fixer = fixsyntax.FixSyntax(project, source_code, resource, maxfixes) pyname = fixer.pyname_at(offset) if pyname is None: return None @@ -108,9 +111,7 @@ def get_definition_location(project, source_code, offset, location cannot be determined ``(None, None)`` is returned. """ - fixer = fixsyntax.FixSyntax(project.pycore, source_code, - resource, maxfixes) - pymodule = fixer.get_pymodule() + fixer = fixsyntax.FixSyntax(project, source_code, resource, maxfixes) pyname = fixer.pyname_at(offset) if pyname is not None: module, lineno = pyname.get_definition_location() @@ -126,6 +127,64 @@ def find_occurrences(*args, **kwds): return rope.contrib.findit.find_occurrences(*args, **kwds) +def get_canonical_path(project, resource, offset): + """Get the canonical path to an object. + + Given the offset of the object, this returns a list of + (name, name_type) tuples representing the canonical path to the + object. For example, the 'x' in the following code: + + class Foo(object): + def bar(self): + class Qux(object): + def mux(self, x): + pass + + we will return: + + [('Foo', 'CLASS'), ('bar', 'FUNCTION'), ('Qux', 'CLASS'), + ('mux', 'FUNCTION'), ('x', 'PARAMETER')] + + `resource` is a `rope.base.resources.Resource` object. + + `offset` is the offset of the pyname you want the path to. + + """ + # Retrieve the PyName. + pymod = project.get_pymodule(resource) + pyname = rope.base.evaluate.eval_location(pymod, offset) + + # Now get the location of the definition and its containing scope. + defmod, lineno = pyname.get_definition_location() + if not defmod: + return None + scope = defmod.get_scope().get_inner_scope_for_line(lineno) + + # Start with the name of the object we're interested in. + names = [] + if isinstance(pyname, pynamesdef.ParameterName): + names = [(worder.get_name_at(pymod.get_resource(), offset), + 'PARAMETER') ] + elif isinstance(pyname, pynamesdef.AssignedName): + names = [(worder.get_name_at(pymod.get_resource(), offset), + 'VARIABLE')] + + # Collect scope names. + while scope.parent: + if isinstance(scope, pyscopes.FunctionScope): + scope_type = 'FUNCTION' + elif isinstance(scope, pyscopes.ClassScope): + scope_type = 'CLASS' + else: + scope_type = None + names.append((scope.pyobject.get_name(), scope_type)) + scope = scope.parent + + names.append((defmod.get_resource().real_path, 'MODULE')) + names.reverse() + return names + + class CompletionProposal(object): """A completion proposal @@ -184,15 +243,14 @@ def type(self): if isinstance(pyobject, builtins.BuiltinFunction): return 'function' elif isinstance(pyobject, builtins.BuiltinClass): - clsobj = pyobject.builtin return 'class' elif isinstance(pyobject, builtins.BuiltinObject) or \ - isinstance(pyobject, builtins.BuiltinName): + isinstance(pyobject, builtins.BuiltinName): return 'instance' elif isinstance(pyname, pynames.ImportedModule): return 'module' elif isinstance(pyname, pynames.ImportedName) or \ - isinstance(pyname, pynames.DefinedName): + isinstance(pyname, pynames.DefinedName): pyobject = pyname.get_object() if isinstance(pyobject, pyobjects.AbstractFunction): return 'function' @@ -222,7 +280,7 @@ def get_doc(self): @property def kind(self): - warnings.warn("the proposal's `kind` property is deprecated, " \ + warnings.warn("the proposal's `kind` property is deprecated, " "use `scope` instead") return self.scope @@ -294,7 +352,6 @@ class _PythonCodeAssist(object): def __init__(self, project, source_code, offset, resource=None, maxfixes=1, later_locals=True): self.project = project - self.pycore = self.project.pycore self.code = source_code self.resource = resource self.maxfixes = maxfixes @@ -309,7 +366,7 @@ def _find_starting_offset(self, source_code, offset): current_offset = offset - 1 while current_offset >= 0 and (source_code[current_offset].isalnum() or source_code[current_offset] in '_'): - current_offset -= 1; + current_offset -= 1 return current_offset + 1 def _matching_keywords(self, starting): @@ -339,11 +396,12 @@ def _dotted_completions(self, module_scope, holding_scope): compl_scope = 'imported' for name, pyname in element.get_attributes().items(): if name.startswith(self.starting): - result[name] = CompletionProposal(name, compl_scope, pyname) + result[name] = CompletionProposal(name, compl_scope, + pyname) return result def _undotted_completions(self, scope, result, lineno=None): - if scope.parent != None: + if scope.parent is not None: self._undotted_completions(scope.parent, result) if lineno is None: names = scope.get_propagated_names() @@ -388,7 +446,7 @@ def _is_defined_after(self, scope, pyname, lineno): def _code_completions(self): lineno = self.code.count('\n', 0, self.offset) + 1 - fixer = fixsyntax.FixSyntax(self.pycore, self.code, + fixer = fixsyntax.FixSyntax(self.project, self.code, self.resource, self.maxfixes) pymodule = fixer.get_pymodule() module_scope = pymodule.get_scope() @@ -413,24 +471,21 @@ def _keyword_parameters(self, pymodule, scope): if offset == 0: return {} word_finder = worder.Worder(self.code, True) - lines = SourceLinesAdapter(self.code) - lineno = lines.get_line_number(offset) if word_finder.is_on_function_call_keyword(offset - 1): - name_finder = rope.base.evaluate.ScopeNameFinder(pymodule) function_parens = word_finder.\ find_parens_start_from_inside(offset - 1) primary = word_finder.get_primary_at(function_parens - 1) try: function_pyname = rope.base.evaluate.\ eval_str(scope, primary) - except exceptions.BadIdentifierError, e: + except exceptions.BadIdentifierError: return {} if function_pyname is not None: pyobject = function_pyname.get_object() if isinstance(pyobject, pyobjects.AbstractFunction): pass elif isinstance(pyobject, pyobjects.AbstractClass) and \ - '__init__' in pyobject: + '__init__' in pyobject: pyobject = pyobject['__init__'].get_object() elif '__call__' in pyobject: pyobject = pyobject['__call__'].get_object() @@ -455,12 +510,12 @@ def __init__(self, code_assist_proposals, scopepref=None, typepref=None): self.proposals = code_assist_proposals if scopepref is None: scopepref = ['parameter_keyword', 'local', 'global', 'imported', - 'attribute', 'builtin', 'keyword'] + 'attribute', 'builtin', 'keyword'] self.scopepref = scopepref if typepref is None: typepref = ['class', 'function', 'instance', 'module', None] self.typerank = dict((type, index) - for index, type in enumerate(typepref)) + for index, type in enumerate(typepref)) def get_sorted_proposal_list(self): """Return a list of `CodeAssistProposal`""" @@ -471,7 +526,7 @@ def get_sorted_proposal_list(self): for scope in self.scopepref: scope_proposals = proposals.get(scope, []) scope_proposals = [proposal for proposal in scope_proposals - if proposal.type in self.typerank] + if proposal.type in self.typerank] scope_proposals.sort(self._proposal_cmp) result.extend(scope_proposals) return result @@ -526,7 +581,8 @@ def get_calltip(self, pyobject, ignore_unknown=False, remove_self=False): def _get_class_docstring(self, pyclass): contents = self._trim_docstring(pyclass.get_doc(), 2) supers = [super.get_name() for super in pyclass.get_superclasses()] - doc = 'class %s(%s):\n\n' % (pyclass.get_name(), ', '.join(supers)) + contents + doc = 'class %s(%s):\n\n' % (pyclass.get_name(), ', '.join(supers)) \ + + contents if '__init__' in pyclass: init = pyclass['__init__'].get_object() @@ -544,7 +600,7 @@ def _get_function_docstring(self, pyfunction): def _is_method(self, pyfunction): return isinstance(pyfunction, pyobjects.PyFunction) and \ - isinstance(pyfunction.parent, pyobjects.PyClass) + isinstance(pyfunction.parent, pyobjects.PyClass) def _get_single_function_docstring(self, pyfunction): signature = self._get_function_signature(pyfunction) @@ -579,7 +635,6 @@ def _location(self, pyobject, add_module=False): parent = parent.parent if add_module: if isinstance(pyobject, pyobjects.PyFunction): - module = pyobject.get_module() location.insert(0, self._get_module(pyobject)) if isinstance(parent, builtins.BuiltinModule): location.insert(0, parent.get_name() + '.') @@ -590,7 +645,7 @@ def _get_module(self, pyfunction): if module is not None: resource = module.get_resource() if resource is not None: - return pyfunction.pycore.modname(resource) + '.' + return libutils.modname(resource) + '.' return '' def _trim_docstring(self, docstring, indents=0): diff --git a/pymode/libs2/rope/contrib/finderrors.py b/pymode/libs2/rope/contrib/finderrors.py index c8cf7e15..9ee7dd15 100644 --- a/pymode/libs2/rope/contrib/finderrors.py +++ b/pymode/libs2/rope/contrib/finderrors.py @@ -31,7 +31,7 @@ def find_errors(project, resource): It returns a list of `Error`\s. """ - pymodule = project.pycore.resource_to_pyobject(resource) + pymodule = project.get_pymodule(resource) finder = _BadAccessFinder(pymodule) ast.walk(pymodule.get_ast(), finder) return finder.errors diff --git a/pymode/libs2/rope/contrib/findit.py b/pymode/libs2/rope/contrib/findit.py index e8ddd7e5..93eb01a8 100644 --- a/pymode/libs2/rope/contrib/findit.py +++ b/pymode/libs2/rope/contrib/findit.py @@ -7,7 +7,8 @@ def find_occurrences(project, resource, offset, unsure=False, resources=None, - in_hierarchy=False, task_handle=taskhandle.NullTaskHandle()): + in_hierarchy=False, + task_handle=taskhandle.NullTaskHandle()): """Return a list of `Location`\s If `unsure` is `True`, possible matches are returned, too. You @@ -18,16 +19,17 @@ def find_occurrences(project, resource, offset, unsure=False, resources=None, """ name = worder.get_name_at(resource, offset) - this_pymodule = project.pycore.resource_to_pyobject(resource) + this_pymodule = project.get_pymodule(resource) primary, pyname = rope.base.evaluate.eval_location2( this_pymodule, offset) + def is_match(occurrence): return unsure finder = occurrences.create_finder( - project.pycore, name, pyname, unsure=is_match, + project, name, pyname, unsure=is_match, in_hierarchy=in_hierarchy, instance=primary) if resources is None: - resources = project.pycore.get_python_files() + resources = project.get_python_files() job_set = task_handle.create_jobset('Finding Occurrences', count=len(resources)) return _find_locations(finder, resources, job_set) @@ -41,7 +43,7 @@ def find_implementations(project, resource, offset, resources=None, `Location`\s. """ name = worder.get_name_at(resource, offset) - this_pymodule = project.pycore.resource_to_pyobject(resource) + this_pymodule = project.get_pymodule(resource) pyname = rope.base.evaluate.eval_location(this_pymodule, offset) if pyname is not None: pyobject = pyname.get_object() @@ -50,17 +52,19 @@ def find_implementations(project, resource, offset, resources=None, raise exceptions.BadIdentifierError('Not a method!') else: raise exceptions.BadIdentifierError('Cannot resolve the identifier!') + def is_defined(occurrence): if not occurrence.is_defined(): return False + def not_self(occurrence): if occurrence.get_pyname().get_object() == pyname.get_object(): return False filters = [is_defined, not_self, occurrences.InHierarchyFilter(pyname, True)] - finder = occurrences.Finder(project.pycore, name, filters=filters) + finder = occurrences.Finder(project, name, filters=filters) if resources is None: - resources = project.pycore.get_python_files() + resources = project.get_python_files() job_set = task_handle.create_jobset('Finding Implementations', count=len(resources)) return _find_locations(finder, resources, job_set) @@ -72,19 +76,19 @@ def find_definition(project, code, offset, resource=None, maxfixes=1): A `Location` object is returned if the definition location can be determined, otherwise ``None`` is returned. """ - fixer = fixsyntax.FixSyntax(project.pycore, code, resource, maxfixes) - main_module = fixer.get_pymodule() + fixer = fixsyntax.FixSyntax(project, code, resource, maxfixes) pyname = fixer.pyname_at(offset) if pyname is not None: module, lineno = pyname.get_definition_location() name = rope.base.worder.Worder(code).get_word_at(offset) if lineno is not None: start = module.lines.get_line_start(lineno) + def check_offset(occurrence): if occurrence.offset < start: return False pyname_filter = occurrences.PyNameFilter(pyname) - finder = occurrences.Finder(project.pycore, name, + finder = occurrences.Finder(project, name, [check_offset, pyname_filter]) for occurrence in finder.find_occurrences(pymodule=module): return Location(occurrence) diff --git a/pymode/libs2/rope/contrib/fixmodnames.py b/pymode/libs2/rope/contrib/fixmodnames.py index 7092f131..d8bd3da1 100644 --- a/pymode/libs2/rope/contrib/fixmodnames.py +++ b/pymode/libs2/rope/contrib/fixmodnames.py @@ -15,7 +15,7 @@ argument. """ -from rope.base import change, taskhandle +from rope.base import taskhandle from rope.contrib import changestack from rope.refactor import rename @@ -57,7 +57,7 @@ def _count_fixes(self, fixer): return len(list(self._tobe_fixed(fixer))) def _tobe_fixed(self, fixer): - for resource in self.project.pycore.get_python_files(): + for resource in self.project.get_python_files(): modname = self._name(resource) if modname != fixer(modname): yield resource diff --git a/pymode/libs2/rope/contrib/fixsyntax.py b/pymode/libs2/rope/contrib/fixsyntax.py index 870046c8..aab5c78c 100644 --- a/pymode/libs2/rope/contrib/fixsyntax.py +++ b/pymode/libs2/rope/contrib/fixsyntax.py @@ -1,13 +1,16 @@ import rope.base.codeanalyze import rope.base.evaluate -from rope.base import worder, exceptions, utils +from rope.base import exceptions +from rope.base import libutils +from rope.base import utils +from rope.base import worder from rope.base.codeanalyze import ArrayLinesAdapter, LogicalLineFinder class FixSyntax(object): - def __init__(self, pycore, code, resource, maxfixes=1): - self.pycore = pycore + def __init__(self, project, code, resource, maxfixes=1): + self.project = project self.code = code self.resource = resource self.maxfixes = maxfixes @@ -22,10 +25,11 @@ def get_pymodule(self): try: if tries == 0 and self.resource is not None and \ self.resource.read() == code: - return self.pycore.resource_to_pyobject(self.resource, - force_errors=True) - return self.pycore.get_string_module( - code, resource=self.resource, force_errors=True) + return self.project.get_pymodule(self.resource, + force_errors=True) + return libutils.get_string_module( + self.project, code, resource=self.resource, + force_errors=True) except exceptions.ModuleSyntaxError, e: if msg is None: msg = '%s:%s %s' % (e.filename, e.lineno, e.message_) @@ -34,7 +38,9 @@ def get_pymodule(self): self.commenter.comment(e.lineno) code = '\n'.join(self.commenter.lines) else: - raise exceptions.ModuleSyntaxError(e.filename, e.lineno, msg) + raise exceptions.ModuleSyntaxError( + e.filename, e.lineno, + 'Failed to fix error: {}'.format(msg)) @property @utils.saveit @@ -43,6 +49,7 @@ def commenter(self): def pyname_at(self, offset): pymodule = self.get_pymodule() + def old_pyname(): word_finder = worder.Worder(self.code, True) expression = word_finder.get_primary_at(offset) @@ -51,6 +58,7 @@ def old_pyname(): scope = pymodule.get_scope().get_inner_scope_for_line(lineno) return rope.base.evaluate.eval_str(scope, expression) new_code = pymodule.source_code + def new_pyname(): newoffset = self.commenter.transfered_offset(offset) return rope.base.evaluate.eval_location(pymodule, newoffset) @@ -108,7 +116,6 @@ def _get_block_end(self, lineno): return end_line def _get_stmt_end(self, lineno): - end_line = lineno base_indents = _get_line_indents(self.lines[lineno]) for i in range(lineno + 1, len(self.lines)): if _get_line_indents(self.lines[i]) <= base_indents: @@ -117,7 +124,7 @@ def _get_stmt_end(self, lineno): def _fix_incomplete_try_blocks(self, lineno, indents): block_start = lineno - last_indents = current_indents = indents + last_indents = indents while block_start > 0: block_start = rope.base.codeanalyze.get_block_start( ArrayLinesAdapter(self.lines), block_start) - 1 @@ -155,6 +162,7 @@ def _insert(self, lineno, line): self.origs.insert(lineno, self.origs[lineno]) self.lines.insert(lineno, line) + def _logical_start(lines, lineno, check_prev=False): logical_finder = LogicalLineFinder(ArrayLinesAdapter(lines)) if check_prev: diff --git a/pymode/libs2/rope/contrib/generate.py b/pymode/libs2/rope/contrib/generate.py index 4d850da0..825f26d6 100644 --- a/pymode/libs2/rope/contrib/generate.py +++ b/pymode/libs2/rope/contrib/generate.py @@ -1,5 +1,7 @@ import rope.base.evaluate -from rope.base import change, pyobjects, exceptions, pynames, worder, codeanalyze +from rope.base import libutils +from rope.base import (change, pyobjects, exceptions, pynames, worder, + codeanalyze) from rope.refactor import sourceutils, importutils, functionutils, suites @@ -24,6 +26,7 @@ def create_module(project, name, sourcefolder=None): parent = parent.get_child(package) return parent.create_file(packages[-1] + '.py') + def create_package(project, name, sourcefolder=None): """Creates a package and returns a `rope.base.resources.Folder`""" if sourcefolder is None: @@ -55,14 +58,16 @@ def _check_exceptional_conditions(self): 'Element <%s> already exists.' % self.name) if not self.info.primary_is_found(): raise exceptions.RefactoringError( - 'Cannot determine the scope <%s> should be defined in.' % self.name) + 'Cannot determine the scope <%s> should be defined in.' % + self.name) def get_changes(self): changes = change.ChangeSet('Generate %s <%s>' % (self._get_element_kind(), self.name)) indents = self.info.get_scope_indents() blanks = self.info.get_blank_lines() - base_definition = sourceutils.fix_indentation(self._get_element(), indents) + base_definition = sourceutils.fix_indentation(self._get_element(), + indents) definition = '\n' * blanks[0] + base_definition + '\n' * blanks[1] resource = self.info.get_insertion_resource() @@ -130,18 +135,19 @@ class GenerateModule(_Generate): def get_changes(self): package = self.info.get_package() changes = change.ChangeSet('Generate Module <%s>' % self.name) - new_resource = self.project.get_file('%s/%s.py' % (package.path, self.name)) + new_resource = self.project.get_file('%s/%s.py' % + (package.path, self.name)) if new_resource.exists(): raise exceptions.RefactoringError( 'Module <%s> already exists' % new_resource.path) changes.add_change(change.CreateResource(new_resource)) changes.add_change(_add_import_to_module( - self.project.pycore, self.resource, new_resource)) + self.project, self.resource, new_resource)) return changes def get_location(self): package = self.info.get_package() - return (package.get_child('%s.py' % self.name) , 1) + return (package.get_child('%s.py' % self.name), 1) class GeneratePackage(_Generate): @@ -149,13 +155,14 @@ class GeneratePackage(_Generate): def get_changes(self): package = self.info.get_package() changes = change.ChangeSet('Generate Package <%s>' % self.name) - new_resource = self.project.get_folder('%s/%s' % (package.path, self.name)) + new_resource = self.project.get_folder('%s/%s' % + (package.path, self.name)) if new_resource.exists(): raise exceptions.RefactoringError( 'Package <%s> already exists' % new_resource.path) changes.add_change(change.CreateResource(new_resource)) changes.add_change(_add_import_to_module( - self.project.pycore, self.resource, new_resource)) + self.project, self.resource, new_resource)) child = self.project.get_folder(package.path + '/' + self.name) changes.add_change(change.CreateFile(child, '__init__.py')) return changes @@ -163,14 +170,14 @@ def get_changes(self): def get_location(self): package = self.info.get_package() child = package.get_child(self.name) - return (child.get_child('__init__.py') , 1) + return (child.get_child('__init__.py'), 1) -def _add_import_to_module(pycore, resource, imported): - pymodule = pycore.resource_to_pyobject(resource) - import_tools = importutils.ImportTools(pycore) +def _add_import_to_module(project, resource, imported): + pymodule = project.get_pymodule(resource) + import_tools = importutils.ImportTools(project) module_imports = import_tools.module_imports(pymodule) - module_name = pycore.modname(imported) + module_name = libutils.modname(imported) new_import = importutils.NormalImport(((module_name, None), )) module_imports.add_import(new_import) return change.ChangeContents(resource, module_imports.get_changed_source()) @@ -182,7 +189,7 @@ def __init__(self, pycore, resource, offset): self.pycore = pycore self.resource = resource self.offset = offset - self.source_pymodule = self.pycore.resource_to_pyobject(resource) + self.source_pymodule = self.pycore.project.get_pymodule(resource) finder = rope.base.evaluate.ScopeNameFinder(self.source_pymodule) self.primary, self.pyname = finder.get_primary_and_pyname_at(offset) self._init_fields() @@ -264,7 +271,7 @@ def get_blank_lines(self): def get_package(self): primary = self.primary if self.primary is None: - return self.pycore.get_source_folders()[0] + return self.pycore.project.get_source_folders()[0] if isinstance(primary.get_object(), pyobjects.PyPackage): return primary.get_object().get_resource() raise exceptions.RefactoringError( @@ -304,15 +311,15 @@ def element_already_exists(self): def is_static_method(self): return self.primary is not None and \ - isinstance(self.primary.get_object(), pyobjects.PyClass) + isinstance(self.primary.get_object(), pyobjects.PyClass) def is_method(self): return self.primary is not None and \ - isinstance(self.primary.get_object().get_type(), pyobjects.PyClass) + isinstance(self.primary.get_object().get_type(), pyobjects.PyClass) def is_constructor(self): return self.pyname is not None and \ - isinstance(self.pyname.get_object(), pyobjects.PyClass) + isinstance(self.pyname.get_object(), pyobjects.PyClass) def is_instance(self): if self.pyname is None: diff --git a/pymode/libs2/rope/refactor/__init__.py b/pymode/libs2/rope/refactor/__init__.py index 10d734c3..4ef67513 100644 --- a/pymode/libs2/rope/refactor/__init__.py +++ b/pymode/libs2/rope/refactor/__init__.py @@ -45,8 +45,8 @@ monitoring the progress of refactorings. """ -from rope.refactor.importutils import ImportOrganizer -from rope.refactor.topackage import ModuleToPackage +from rope.refactor.importutils import ImportOrganizer # noqa +from rope.refactor.topackage import ModuleToPackage # noqa __all__ = ['rename', 'move', 'inline', 'extract', 'restructure', 'topackage', diff --git a/pymode/libs2/rope/refactor/change_signature.py b/pymode/libs2/rope/refactor/change_signature.py index a8c50d71..4279d9cf 100644 --- a/pymode/libs2/rope/refactor/change_signature.py +++ b/pymode/libs2/rope/refactor/change_signature.py @@ -1,7 +1,12 @@ import copy import rope.base.exceptions -from rope.base import pyobjects, taskhandle, evaluate, worder, codeanalyze, utils +from rope.base import codeanalyze +from rope.base import evaluate +from rope.base import pyobjects +from rope.base import taskhandle +from rope.base import utils +from rope.base import worder from rope.base.change import ChangeContents, ChangeSet from rope.refactor import occurrences, functionutils @@ -9,7 +14,7 @@ class ChangeSignature(object): def __init__(self, project, resource, offset): - self.pycore = project.pycore + self.project = project self.resource = resource self.offset = offset self._set_name_and_pyname() @@ -20,7 +25,7 @@ def __init__(self, project, resource, offset): def _set_name_and_pyname(self): self.name = worder.get_name_at(self.resource, self.offset) - this_pymodule = self.pycore.resource_to_pyobject(self.resource) + this_pymodule = self.project.get_pymodule(self.resource) self.primary, self.pyname = evaluate.eval_location2( this_pymodule, self.offset) if self.pyname is None: @@ -42,21 +47,21 @@ def _set_name_and_pyname(self): def _change_calls(self, call_changer, in_hierarchy=None, resources=None, handle=taskhandle.NullTaskHandle()): if resources is None: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() changes = ChangeSet('Changing signature of <%s>' % self.name) job_set = handle.create_jobset('Collecting Changes', len(resources)) finder = occurrences.create_finder( - self.pycore, self.name, self.pyname, instance=self.primary, + self.project, self.name, self.pyname, instance=self.primary, in_hierarchy=in_hierarchy and self.is_method()) if self.others: name, pyname = self.others constructor_finder = occurrences.create_finder( - self.pycore, name, pyname, only_calls=True) + self.project, name, pyname, only_calls=True) finder = _MultipleFinders([finder, constructor_finder]) for file in resources: job_set.started_job(file.path) change_calls = _ChangeCallsInModule( - self.pycore, finder, file, call_changer) + self.project, finder, file, call_changer) changed_file = change_calls.get_changed_module() if changed_file is not None: changes.add_change(ChangeContents(file, changed_file)) @@ -160,12 +165,15 @@ def change_definition(self, call): def change_call(self, primary, pyname, call): call_info = functionutils.CallInfo.read( primary, pyname, self.definition_info, call) - mapping = functionutils.ArgumentMapping(self.definition_info, call_info) + mapping = functionutils.ArgumentMapping(self.definition_info, + call_info) - for definition_info, changer in zip(self.changed_definition_infos, self.changers): + for definition_info, changer in zip(self.changed_definition_infos, + self.changers): changer.change_argument_mapping(definition_info, mapping) - return mapping.to_call_info(self.changed_definition_infos[-1]).to_string() + return mapping.to_call_info( + self.changed_definition_infos[-1]).to_string() class _ArgumentChanger(object): @@ -190,12 +198,14 @@ def change_definition_info(self, call_info): if self.index < len(call_info.args_with_defaults): del call_info.args_with_defaults[self.index] elif self.index == len(call_info.args_with_defaults) and \ - call_info.args_arg is not None: + call_info.args_arg is not None: call_info.args_arg = None elif (self.index == len(call_info.args_with_defaults) and - call_info.args_arg is None and call_info.keywords_arg is not None) or \ - (self.index == len(call_info.args_with_defaults) + 1 and - call_info.args_arg is not None and call_info.keywords_arg is not None): + call_info.args_arg is None and + call_info.keywords_arg is not None) or \ + (self.index == len(call_info.args_with_defaults) + 1 and + call_info.args_arg is not None and + call_info.keywords_arg is not None): call_info.keywords_arg = None def change_argument_mapping(self, definition_info, mapping): @@ -282,8 +292,8 @@ def change_definition_info(self, definition_info): class _ChangeCallsInModule(object): - def __init__(self, pycore, occurrence_finder, resource, call_changer): - self.pycore = pycore + def __init__(self, project, occurrence_finder, resource, call_changer): + self.project = project self.occurrence_finder = occurrence_finder self.resource = resource self.call_changer = call_changer @@ -291,11 +301,13 @@ def __init__(self, pycore, occurrence_finder, resource, call_changer): def get_changed_module(self): word_finder = worder.Worder(self.source) change_collector = codeanalyze.ChangeCollector(self.source) - for occurrence in self.occurrence_finder.find_occurrences(self.resource): + for occurrence in self.occurrence_finder.find_occurrences( + self.resource): if not occurrence.is_called() and not occurrence.is_defined(): continue start, end = occurrence.get_primary_range() - begin_parens, end_parens = word_finder.get_word_parens_range(end - 1) + begin_parens, end_parens = word_finder.\ + get_word_parens_range(end - 1) if occurrence.is_called(): primary, pyname = occurrence.get_primary_and_pyname() changed_call = self.call_changer.change_call( @@ -310,7 +322,7 @@ def get_changed_module(self): @property @utils.saveit def pymodule(self): - return self.pycore.resource_to_pyobject(self.resource) + return self.project.get_pymodule(self.resource) @property @utils.saveit diff --git a/pymode/libs2/rope/refactor/encapsulate_field.py b/pymode/libs2/rope/refactor/encapsulate_field.py index af8d3ccf..32cb7a95 100644 --- a/pymode/libs2/rope/refactor/encapsulate_field.py +++ b/pymode/libs2/rope/refactor/encapsulate_field.py @@ -1,4 +1,10 @@ -from rope.base import pynames, taskhandle, evaluate, exceptions, worder, utils +from rope.base import evaluate +from rope.base import exceptions +from rope.base import libutils +from rope.base import pynames +from rope.base import taskhandle +from rope.base import utils +from rope.base import worder from rope.base.change import ChangeSet, ChangeContents from rope.refactor import sourceutils, occurrences @@ -6,9 +12,9 @@ class EncapsulateField(object): def __init__(self, project, resource, offset): - self.pycore = project.pycore + self.project = project self.name = worder.get_name_at(resource, offset) - this_pymodule = self.pycore.resource_to_pyobject(resource) + this_pymodule = self.project.get_pymodule(resource) self.pyname = evaluate.eval_location(this_pymodule, offset) if not self._is_an_attribute(self.pyname): raise exceptions.RefactoringError( @@ -30,7 +36,7 @@ def get_changes(self, getter=None, setter=None, resources=None, """ if resources is None: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() changes = ChangeSet('Encapsulate field <%s>' % self.name) job_set = task_handle.create_jobset('Collecting Changes', len(resources)) @@ -39,7 +45,7 @@ def get_changes(self, getter=None, setter=None, resources=None, if setter is None: setter = 'set_' + self.name renamer = GetterSetterRenameInModule( - self.pycore, self.name, self.pyname, getter, setter) + self.project, self.name, self.pyname, getter, setter) for file in resources: job_set.started_job(file.path) if file == self.resource: @@ -61,7 +67,7 @@ def _is_an_attribute(self, pyname): if pyname is not None and isinstance(pyname, pynames.AssignedName): pymodule, lineno = self.pyname.get_definition_location() scope = pymodule.get_scope().\ - get_inner_scope_for_line(lineno) + get_inner_scope_for_line(lineno) if scope.get_kind() == 'Class': return pyname in scope.get_names().values() parent = scope.parent @@ -80,7 +86,7 @@ def _get_defining_scope(self): return pymodule.get_scope().get_inner_scope_for_line(line) def _change_holding_module(self, changes, renamer, getter, setter): - pymodule = self.pycore.resource_to_pyobject(self.resource) + pymodule = self.project.get_pymodule(self.resource) class_scope = self._get_defining_class_scope() defining_object = self._get_defining_scope().pyobject start, end = sourceutils.get_body_region(defining_object) @@ -88,10 +94,11 @@ def _change_holding_module(self, changes, renamer, getter, setter): new_source = renamer.get_changed_module(pymodule=pymodule, skip_start=start, skip_end=end) if new_source is not None: - pymodule = self.pycore.get_string_module(new_source, self.resource) + pymodule = libutils.get_string_module( + self.project, new_source, self.resource) class_scope = pymodule.get_scope().\ - get_inner_scope_for_line(class_scope.get_start()) - indents = sourceutils.get_indent(self.pycore) * ' ' + get_inner_scope_for_line(class_scope.get_start()) + indents = sourceutils.get_indent(self.project) * ' ' getter = 'def %s(self):\n%sreturn self.%s' % \ (getter, indents, self.name) setter = 'def %s(self, value):\n%sself.%s = value' % \ @@ -103,10 +110,10 @@ def _change_holding_module(self, changes, renamer, getter, setter): class GetterSetterRenameInModule(object): - def __init__(self, pycore, name, pyname, getter, setter): - self.pycore = pycore + def __init__(self, project, name, pyname, getter, setter): + self.project = project self.name = name - self.finder = occurrences.create_finder(pycore, name, pyname) + self.finder = occurrences.create_finder(project, name, pyname) self.getter = getter self.setter = setter @@ -120,7 +127,7 @@ def get_changed_module(self, resource=None, pymodule=None, class _FindChangesForModule(object): def __init__(self, finder, resource, pymodule, skip_start, skip_end): - self.pycore = finder.pycore + self.project = finder.project self.finder = finder.finder self.getter = finder.getter self.setter = finder.setter @@ -155,7 +162,7 @@ def get_changed_module(self): + ' %s ' % assignment_type[:-1]) current_line = self.lines.get_line_number(start) start_line, end_line = self.pymodule.logical_lines.\ - logical_line_in(current_line) + logical_line_in(current_line) self.last_set = self.lines.get_line_end(end_line) end = self.source.index('=', end) + 1 self.set_index = len(result) @@ -193,7 +200,7 @@ def source(self): @utils.saveit def lines(self): if self.pymodule is None: - self.pymodule = self.pycore.resource_to_pyobject(self.resource) + self.pymodule = self.project.get_pymodule(self.resource) return self.pymodule.lines @property diff --git a/pymode/libs2/rope/refactor/extract.py b/pymode/libs2/rope/refactor/extract.py index 3e7a619c..be541bb5 100644 --- a/pymode/libs2/rope/refactor/extract.py +++ b/pymode/libs2/rope/refactor/extract.py @@ -12,7 +12,7 @@ # # _ExtractInfo: holds information about the refactoring; it is passed # to the parts that need to have information about the refactoring -# +# # _ExtractCollector: merely saves all of the information necessary for # performing the refactoring. # @@ -36,7 +36,6 @@ class _ExtractRefactoring(object): def __init__(self, project, resource, start_offset, end_offset, variable=False): self.project = project - self.pycore = project.pycore self.resource = resource self.start_offset = self._fix_start(resource.read(), start_offset) self.end_offset = self._fix_end(resource.read(), end_offset) @@ -95,9 +94,9 @@ class _ExtractInfo(object): def __init__(self, project, resource, start, end, new_name, variable, similar, make_global): - self.pycore = project.pycore + self.project = project self.resource = resource - self.pymodule = self.pycore.resource_to_pyobject(resource) + self.pymodule = project.get_pymodule(resource) self.global_scope = self.pymodule.get_scope() self.source = self.pymodule.source_code self.lines = self.pymodule.lines @@ -153,8 +152,8 @@ def _choose_closest_line_end(self, offset, end=False): @property def one_line(self): return self.region != self.lines_region and \ - (self.logical_lines.logical_line_in(self.region_lines[0]) == - self.logical_lines.logical_line_in(self.region_lines[1])) + (self.logical_lines.logical_line_in(self.region_lines[0]) == + self.logical_lines.logical_line_in(self.region_lines[1])) @property def global_(self): @@ -163,7 +162,7 @@ def global_(self): @property def method(self): return self.scope.parent is not None and \ - self.scope.parent.get_kind() == 'Class' + self.scope.parent.get_kind() == 'Class' @property def indents(self): @@ -182,6 +181,7 @@ def extracted(self): return self.source[self.region[0]:self.region[1]] _returned = None + @property def returned(self): """Does the extracted piece contain return statement""" @@ -273,7 +273,8 @@ def _where_to_search(self): if self.info.variable: return [self.info.scope_region] else: - return [self.info._get_scope_region(self.info.scope.parent)] + return [self.info._get_scope_region( + self.info.scope.parent)] else: return [self.info.region] @@ -391,8 +392,9 @@ def multi_line_conditions(self, info): 'contain complete statements.') def _is_region_on_a_word(self, info): - if info.region[0] > 0 and self._is_on_a_word(info, info.region[0] - 1) or \ - self._is_on_a_word(info, info.region[1] - 1): + if info.region[0] > 0 and \ + self._is_on_a_word(info, info.region[0] - 1) or \ + self._is_on_a_word(info, info.region[1] - 1): return True def _is_on_a_word(self, info, offset): @@ -436,7 +438,7 @@ def _get_body(self): return result def _find_temps(self): - return usefunction.find_temps(self.info.pycore.project, + return usefunction.find_temps(self.info.project, self._get_body()) def get_checks(self): @@ -468,7 +470,7 @@ def _get_function_definition(self): result.append('@staticmethod\n') result.append('def %s:\n' % self._get_function_signature(args)) unindented_body = self._get_unindented_function_body(returns) - indents = sourceutils.get_indent(self.info.pycore) + indents = sourceutils.get_indent(self.info.project) function_body = sourceutils.indent_lines(unindented_body, indents) result.append(function_body) definition = ''.join(result) @@ -487,11 +489,11 @@ def _get_function_signature(self, args): args.remove(self_name) args.insert(0, self_name) return prefix + self.info.new_name + \ - '(%s)' % self._get_comma_form(args) + '(%s)' % self._get_comma_form(args) def _extracting_method(self): return self.info.method and not self.info.make_global and \ - _get_function_kind(self.info.scope) == 'method' + _get_function_kind(self.info.scope) == 'method' def _get_self_name(self): param_names = self.info.scope.pyobject.get_param_names() @@ -503,7 +505,7 @@ def _get_function_call(self, args): if self.info.method and not self.info.make_global: if _get_function_kind(self.info.scope) == 'method': self_name = self._get_self_name() - if self_name in args: + if self_name in args: args.remove(self_name) prefix = self_name + '.' else: @@ -557,7 +559,7 @@ def _find_function_returns(self): if self.info.one_line or self.info.returned: return [] written = self.info_collector.written | \ - self.info_collector.maybe_written + self.info_collector.maybe_written return list(written & self.info_collector.postread) def _get_unindented_function_body(self, returns): @@ -577,7 +579,7 @@ def __init__(self, info): def get_definition(self): result = self.info.new_name + ' = ' + \ - _join_lines(self.info.extracted) + '\n' + _join_lines(self.info.extracted) + '\n' return result def get_body_pattern(self): @@ -671,7 +673,6 @@ def _For(self, node): self._handle_conditional_node(node) - def _get_argnames(arguments): result = [node.id for node in arguments.args if isinstance(node, ast.Name)] @@ -770,6 +771,7 @@ def has_errors(code): ast.walk(node, visitor) return visitor.error + def _get_function_kind(scope): return scope.pyobject.get_kind() @@ -779,6 +781,7 @@ def _parse_text(body): node = ast.parse(body) return node + def _join_lines(code): lines = [] for line in code.splitlines(): diff --git a/pymode/libs2/rope/refactor/functionutils.py b/pymode/libs2/rope/refactor/functionutils.py index a653b9db..58baf917 100644 --- a/pymode/libs2/rope/refactor/functionutils.py +++ b/pymode/libs2/rope/refactor/functionutils.py @@ -32,9 +32,6 @@ def arguments_to_string(self, from_index=0): @staticmethod def _read(pyfunction, code): - scope = pyfunction.get_scope() - parent = scope.parent - parameter_names = pyfunction.get_param_names() kind = pyfunction.get_kind() is_method = kind == 'method' is_lambda = kind == 'lambda' @@ -89,7 +86,8 @@ def to_string(self): if self.args[start:]: params.extend(self.args[start:]) if self.keywords: - params.extend(['%s=%s' % (name, value) for name, value in self.keywords]) + params.extend(['%s=%s' % (name, value) + for name, value in self.keywords]) if self.args_arg is not None: params.append('*' + self.args_arg) if self.keywords_arg: @@ -120,15 +118,15 @@ def read(primary, pyname, definition_info, code): @staticmethod def _is_method_call(primary, pyname): return primary is not None and \ - isinstance(primary.get_object().get_type(), - rope.base.pyobjects.PyClass) and \ - CallInfo._is_method(pyname) + isinstance(primary.get_object().get_type(), + rope.base.pyobjects.PyClass) and \ + CallInfo._is_method(pyname) @staticmethod def _is_class(pyname): return pyname is not None and \ - isinstance(pyname.get_object(), - rope.base.pyobjects.PyClass) + isinstance(pyname.get_object(), + rope.base.pyobjects.PyClass) @staticmethod def _is_method(pyname): @@ -184,7 +182,8 @@ def to_call_info(self, definition_info): keywords.extend(self.keyword_args) return CallInfo(self.call_info.function_name, args, keywords, self.call_info.args_arg, self.call_info.keywords_arg, - self.call_info.implicit_arg, self.call_info.constructor) + self.call_info.implicit_arg, + self.call_info.constructor) class _FunctionParser(object): @@ -197,7 +196,8 @@ def __init__(self, call, implicit_arg, is_lambda=False): self.last_parens = self.call.rindex(':') else: self.last_parens = self.call.rindex(')') - self.first_parens = self.word_finder._find_parens_start(self.last_parens) + self.first_parens = self.word_finder._find_parens_start( + self.last_parens) def get_parameters(self): args, keywords = self.word_finder.get_parameters(self.first_parens, diff --git a/pymode/libs2/rope/refactor/importutils/__init__.py b/pymode/libs2/rope/refactor/importutils/__init__.py index 2a86edb0..4871faf3 100644 --- a/pymode/libs2/rope/refactor/importutils/__init__.py +++ b/pymode/libs2/rope/refactor/importutils/__init__.py @@ -5,6 +5,7 @@ """ import rope.base.evaluate +from rope.base import libutils from rope.base.change import ChangeSet, ChangeContents from rope.refactor import occurrences, rename from rope.refactor.importutils import module_imports, actions @@ -21,8 +22,7 @@ class ImportOrganizer(object): def __init__(self, project): self.project = project - self.pycore = project.pycore - self.import_tools = ImportTools(self.pycore) + self.import_tools = ImportTools(self.project) def organize_imports(self, resource, offset=None): return self._perform_command_on_import_tools( @@ -45,7 +45,7 @@ def handle_long_imports(self, resource, offset=None): self.import_tools.handle_long_imports, resource, offset) def _perform_command_on_import_tools(self, method, resource, offset): - pymodule = self.pycore.resource_to_pyobject(resource) + pymodule = self.project.get_pymodule(resource) before_performing = pymodule.source_code import_filter = None if offset is not None: @@ -66,26 +66,26 @@ def import_filter(import_stmt): class ImportTools(object): - def __init__(self, pycore): - self.pycore = pycore + def __init__(self, project): + self.project = project def get_import(self, resource): """The import statement for `resource`""" - module_name = self.pycore.modname(resource) + module_name = libutils.modname(resource) return NormalImport(((module_name, None), )) def get_from_import(self, resource, name): """The from import statement for `name` in `resource`""" - module_name = self.pycore.modname(resource) + module_name = libutils.modname(resource) names = [] if isinstance(name, list): names = [(imported, None) for imported in name] else: - names = [(name, None),] + names = [(name, None), ] return FromImport(module_name, 0, tuple(names)) def module_imports(self, module, imports_filter=None): - return module_imports.ModuleImports(self.pycore, module, + return module_imports.ModuleImports(self.project, module, imports_filter) def froms_to_imports(self, pymodule, import_filter=None): @@ -103,7 +103,8 @@ def froms_to_imports(self, pymodule, import_filter=None): if not import_stmt.readonly and \ self._is_transformable_to_normal(import_stmt.import_info): import_stmt.import_info = \ - NormalImport(((import_stmt.import_info.module_name, None),)) + NormalImport(((import_stmt.import_info.module_name, + None),)) module_imports.remove_duplicates() return module_imports.get_changed_source() @@ -121,12 +122,13 @@ def _from_to_normal(self, pymodule, import_stmt): if alias is not None: imported = alias occurrence_finder = occurrences.create_finder( - self.pycore, imported, pymodule[imported], imports=False) + self.project, imported, pymodule[imported], imports=False) source = rename.rename_in_module( occurrence_finder, module_name + '.' + name, pymodule=pymodule, replace_primary=True) if source is not None: - pymodule = self.pycore.get_string_module(source, resource) + pymodule = libutils.get_string_module( + self.project, source, resource) return pymodule def _clean_up_imports(self, pymodule, import_filter): @@ -135,17 +137,20 @@ def _clean_up_imports(self, pymodule, import_filter): module_with_imports.expand_stars() source = module_with_imports.get_changed_source() if source is not None: - pymodule = self.pycore.get_string_module(source, resource) + pymodule = libutils.get_string_module( + self.project, source, resource) source = self.relatives_to_absolutes(pymodule) if source is not None: - pymodule = self.pycore.get_string_module(source, resource) + pymodule = libutils.get_string_module( + self.project, source, resource) module_with_imports = self.module_imports(pymodule, import_filter) module_with_imports.remove_duplicates() module_with_imports.remove_unused_imports() source = module_with_imports.get_changed_source() if source is not None: - pymodule = self.pycore.get_string_module(source, resource) + pymodule = libutils.get_string_module( + self.project, source, resource) return pymodule def relatives_to_absolutes(self, pymodule, import_filter=None): @@ -172,12 +177,14 @@ def organize_imports(self, pymodule, module_imports = self.module_imports(pymodule, import_filter) if unused: module_imports.remove_unused_imports() + if self.project.prefs.get("split_imports"): + module_imports.force_single_imports() if duplicates: module_imports.remove_duplicates() source = module_imports.get_changed_source() if source is not None: - pymodule = self.pycore.get_string_module( - source, pymodule.get_resource()) + pymodule = libutils.get_string_module( + self.project, source, pymodule.get_resource()) if selfs: pymodule = self._remove_self_imports(pymodule, import_filter) if sort: @@ -187,10 +194,12 @@ def organize_imports(self, pymodule, def _remove_self_imports(self, pymodule, import_filter=None): module_imports = self.module_imports(pymodule, import_filter) - to_be_fixed, to_be_renamed = module_imports.get_self_import_fix_and_rename_list() + to_be_fixed, to_be_renamed = \ + module_imports.get_self_import_fix_and_rename_list() for name in to_be_fixed: try: - pymodule = self._rename_in_module(pymodule, name, '', till_dot=True) + pymodule = self._rename_in_module(pymodule, name, '', + till_dot=True) except ValueError: # There is a self import with direct access to it return pymodule @@ -200,16 +209,18 @@ def _remove_self_imports(self, pymodule, import_filter=None): module_imports.get_self_import_fix_and_rename_list() source = module_imports.get_changed_source() if source is not None: - pymodule = self.pycore.get_string_module(source, pymodule.get_resource()) + pymodule = libutils.get_string_module( + self.project, source, pymodule.get_resource()) return pymodule def _rename_in_module(self, pymodule, name, new_name, till_dot=False): old_name = name.split('.')[-1] old_pyname = rope.base.evaluate.eval_str(pymodule.get_scope(), name) occurrence_finder = occurrences.create_finder( - self.pycore, old_name, old_pyname, imports=False) + self.project, old_name, old_pyname, imports=False) changes = rope.base.codeanalyze.ChangeCollector(pymodule.source_code) - for occurrence in occurrence_finder.find_occurrences(pymodule=pymodule): + for occurrence in occurrence_finder.find_occurrences( + pymodule=pymodule): start, end = occurrence.get_primary_range() if till_dot: new_end = pymodule.source_code.index('.', end) + 1 @@ -222,7 +233,8 @@ def _rename_in_module(self, pymodule, name, new_name, till_dot=False): changes.add_change(start, end, new_name) source = changes.get_changed() if source is not None: - pymodule = self.pycore.get_string_module(source, pymodule.get_resource()) + pymodule = libutils.get_string_module( + self.project, source, pymodule.get_resource()) return pymodule def sort_imports(self, pymodule, import_filter=None): @@ -237,8 +249,8 @@ def handle_long_imports(self, pymodule, maxdots=2, maxlength=27, module_imports = self.module_imports(pymodule, import_filter) to_be_fixed = module_imports.handle_long_imports(maxdots, maxlength) # performing the renaming - pymodule = self.pycore.get_string_module( - module_imports.get_changed_source(), + pymodule = libutils.get_string_module( + self.project, module_imports.get_changed_source(), resource=pymodule.get_resource()) for name in to_be_fixed: pymodule = self._rename_in_module(pymodule, name, @@ -248,22 +260,22 @@ def handle_long_imports(self, pymodule, maxdots=2, maxlength=27, import_filter=import_filter) -def get_imports(pycore, pydefined): +def get_imports(project, pydefined): """A shortcut for getting the `ImportInfo`\s used in a scope""" pymodule = pydefined.get_module() - module = module_imports.ModuleImports(pycore, pymodule) + module = module_imports.ModuleImports(project, pymodule) if pymodule == pydefined: return [stmt.import_info for stmt in module.imports] return module.get_used_imports(pydefined) -def get_module_imports(pycore, pymodule): +def get_module_imports(project, pymodule): """A shortcut for creating a `module_imports.ModuleImports` object""" - return module_imports.ModuleImports(pycore, pymodule) + return module_imports.ModuleImports(project, pymodule) -def add_import(pycore, pymodule, module_name, name=None): - imports = get_module_imports(pycore, pymodule) +def add_import(project, pymodule, module_name, name=None): + imports = get_module_imports(project, pymodule) candidates = [] names = [] # from mod import name @@ -288,7 +300,7 @@ def add_import(pycore, pymodule, module_name, name=None): candidates.append(normal_import) - visitor = actions.AddingVisitor(pycore, candidates) + visitor = actions.AddingVisitor(project, candidates) selected_import = normal_import for import_statement in imports.imports: if import_statement.accept(visitor): diff --git a/pymode/libs2/rope/refactor/importutils/actions.py b/pymode/libs2/rope/refactor/importutils/actions.py index 4851d02f..fd0f7054 100644 --- a/pymode/libs2/rope/refactor/importutils/actions.py +++ b/pymode/libs2/rope/refactor/importutils/actions.py @@ -1,6 +1,4 @@ -import os -import sys - +from rope.base import libutils from rope.base import pyobjects, exceptions, stdmods from rope.refactor import occurrences from rope.refactor.importutils import importinfo @@ -28,24 +26,25 @@ def visitFromImport(self, import_stmt, import_info): class RelativeToAbsoluteVisitor(ImportInfoVisitor): - def __init__(self, pycore, current_folder): + def __init__(self, project, current_folder): self.to_be_absolute = [] - self.pycore = pycore + self.project = project self.folder = current_folder - self.context = importinfo.ImportContext(pycore, current_folder) + self.context = importinfo.ImportContext(project, current_folder) def visitNormalImport(self, import_stmt, import_info): - self.to_be_absolute.extend(self._get_relative_to_absolute_list(import_info)) + self.to_be_absolute.extend( + self._get_relative_to_absolute_list(import_info)) new_pairs = [] for name, alias in import_info.names_and_aliases: - resource = self.pycore.find_module(name, folder=self.folder) + resource = self.project.find_module(name, folder=self.folder) if resource is None: new_pairs.append((name, alias)) continue - absolute_name = self.pycore.modname(resource) + absolute_name = libutils.modname(resource) new_pairs.append((absolute_name, alias)) if not import_info._are_name_and_alias_lists_equal( - new_pairs, import_info.names_and_aliases): + new_pairs, import_info.names_and_aliases): import_stmt.import_info = importinfo.NormalImport(new_pairs) def _get_relative_to_absolute_list(self, import_info): @@ -53,10 +52,10 @@ def _get_relative_to_absolute_list(self, import_info): for name, alias in import_info.names_and_aliases: if alias is not None: continue - resource = self.pycore.find_module(name, folder=self.folder) + resource = self.project.find_module(name, folder=self.folder) if resource is None: continue - absolute_name = self.pycore.modname(resource) + absolute_name = libutils.modname(resource) if absolute_name != name: result.append((name, absolute_name)) return result @@ -65,7 +64,7 @@ def visitFromImport(self, import_stmt, import_info): resource = import_info.get_imported_resource(self.context) if resource is None: return None - absolute_name = self.pycore.modname(resource) + absolute_name = libutils.modname(resource) if import_info.module_name != absolute_name: import_stmt.import_info = importinfo.FromImport( absolute_name, 0, import_info.names_and_aliases) @@ -73,11 +72,11 @@ def visitFromImport(self, import_stmt, import_info): class FilteringVisitor(ImportInfoVisitor): - def __init__(self, pycore, folder, can_select): + def __init__(self, project, folder, can_select): self.to_be_absolute = [] - self.pycore = pycore + self.project = project self.can_select = self._transform_can_select(can_select) - self.context = importinfo.ImportContext(pycore, folder) + self.context = importinfo.ImportContext(project, folder) def _transform_can_select(self, can_select): def can_select_name_and_alias(name, alias): @@ -113,10 +112,10 @@ def visitFromImport(self, import_stmt, import_info): class RemovingVisitor(ImportInfoVisitor): - def __init__(self, pycore, folder, can_select): + def __init__(self, project, folder, can_select): self.to_be_absolute = [] - self.pycore = pycore - self.filtering = FilteringVisitor(pycore, folder, can_select) + self.project = project + self.filtering = FilteringVisitor(project, folder, can_select) def dispatch(self, import_): result = self.filtering.dispatch(import_) @@ -133,8 +132,8 @@ class AddingVisitor(ImportInfoVisitor): """ - def __init__(self, pycore, import_list): - self.pycore = pycore + def __init__(self, project, import_list): + self.project = project self.import_list = import_list self.import_info = None @@ -162,7 +161,8 @@ def visitNormalImport(self, import_stmt, import_info): # Multiple imports using a single import statement is discouraged # so we won't bother adding them. if self.import_info._are_name_and_alias_lists_equal( - import_info.names_and_aliases, self.import_info.names_and_aliases): + import_info.names_and_aliases, + self.import_info.names_and_aliases): return True def visitFromImport(self, import_stmt, import_info): @@ -174,6 +174,9 @@ def visitFromImport(self, import_stmt, import_info): if self.import_info.is_star_import(): import_stmt.import_info = self.import_info return True + if self.project.prefs.get("split_imports"): + return self.import_info.names_and_aliases == \ + import_info.names_and_aliases new_pairs = list(import_info.names_and_aliases) for pair in self.import_info.names_and_aliases: if pair not in new_pairs: @@ -185,10 +188,10 @@ def visitFromImport(self, import_stmt, import_info): class ExpandStarsVisitor(ImportInfoVisitor): - def __init__(self, pycore, folder, can_select): - self.pycore = pycore - self.filtering = FilteringVisitor(pycore, folder, can_select) - self.context = importinfo.ImportContext(pycore, folder) + def __init__(self, project, folder, can_select): + self.project = project + self.filtering = FilteringVisitor(project, folder, can_select) + self.context = importinfo.ImportContext(project, folder) def visitNormalImport(self, import_stmt, import_info): self.filtering.dispatch(import_stmt) @@ -208,18 +211,18 @@ def visitFromImport(self, import_stmt, import_info): class SelfImportVisitor(ImportInfoVisitor): - def __init__(self, pycore, current_folder, resource): - self.pycore = pycore + def __init__(self, project, current_folder, resource): + self.project = project self.folder = current_folder self.resource = resource self.to_be_fixed = set() self.to_be_renamed = set() - self.context = importinfo.ImportContext(pycore, current_folder) + self.context = importinfo.ImportContext(project, current_folder) def visitNormalImport(self, import_stmt, import_info): new_pairs = [] for name, alias in import_info.names_and_aliases: - resource = self.pycore.find_module(name, folder=self.folder) + resource = self.project.find_module(name, folder=self.folder) if resource is not None and resource == self.resource: imported = name if alias is not None: @@ -228,7 +231,7 @@ def visitNormalImport(self, import_stmt, import_info): else: new_pairs.append((name, alias)) if not import_info._are_name_and_alias_lists_equal( - new_pairs, import_info.names_and_aliases): + new_pairs, import_info.names_and_aliases): import_stmt.import_info = importinfo.NormalImport(new_pairs) def visitFromImport(self, import_stmt, import_info): @@ -238,7 +241,7 @@ def visitFromImport(self, import_stmt, import_info): if resource == self.resource: self._importing_names_from_self(import_info, import_stmt) return - pymodule = self.pycore.resource_to_pyobject(resource) + pymodule = self.project.get_pymodule(resource) new_pairs = [] for name, alias in import_info.names_and_aliases: try: @@ -254,7 +257,7 @@ def visitFromImport(self, import_stmt, import_info): except exceptions.AttributeNotFoundError: new_pairs.append((name, alias)) if not import_info._are_name_and_alias_lists_equal( - new_pairs, import_info.names_and_aliases): + new_pairs, import_info.names_and_aliases): import_stmt.import_info = importinfo.FromImport( import_info.module_name, import_info.level, new_pairs) @@ -268,19 +271,19 @@ def _importing_names_from_self(self, import_info, import_stmt): class SortingVisitor(ImportInfoVisitor): - def __init__(self, pycore, current_folder): - self.pycore = pycore + def __init__(self, project, current_folder): + self.project = project self.folder = current_folder self.standard = set() self.third_party = set() self.in_project = set() self.future = set() - self.context = importinfo.ImportContext(pycore, current_folder) + self.context = importinfo.ImportContext(project, current_folder) def visitNormalImport(self, import_stmt, import_info): if import_info.names_and_aliases: name, alias = import_info.names_and_aliases[0] - resource = self.pycore.find_module( + resource = self.project.find_module( name, folder=self.folder) self._check_imported_resource(import_stmt, resource, name) @@ -291,7 +294,7 @@ def visitFromImport(self, import_stmt, import_info): def _check_imported_resource(self, import_stmt, resource, imported_name): info = import_stmt.import_info - if resource is not None and resource.project == self.pycore.project: + if resource is not None and resource.project == self.project: self.in_project.add(import_stmt) elif _is_future(info): self.future.add(import_stmt) @@ -303,16 +306,15 @@ def _check_imported_resource(self, import_stmt, resource, imported_name): class LongImportVisitor(ImportInfoVisitor): - def __init__(self, current_folder, pycore, maxdots, maxlength): + def __init__(self, current_folder, project, maxdots, maxlength): self.maxdots = maxdots self.maxlength = maxlength self.to_be_renamed = set() self.current_folder = current_folder - self.pycore = pycore + self.project = project self.new_imports = [] def visitNormalImport(self, import_stmt, import_info): - new_pairs = [] for name, alias in import_info.names_and_aliases: if alias is None and self._is_long(name): self.to_be_renamed.add(name) @@ -324,15 +326,15 @@ def visitNormalImport(self, import_stmt, import_info): def _is_long(self, name): return name.count('.') > self.maxdots or \ - ('.' in name and len(name) > self.maxlength) + ('.' in name and len(name) > self.maxlength) class RemovePyNameVisitor(ImportInfoVisitor): - def __init__(self, pycore, pymodule, pyname, folder): + def __init__(self, project, pymodule, pyname, folder): self.pymodule = pymodule self.pyname = pyname - self.context = importinfo.ImportContext(pycore, folder) + self.context = importinfo.ImportContext(project, folder) def visitFromImport(self, import_stmt, import_info): new_pairs = [] @@ -356,4 +358,4 @@ def dispatch(self, import_): def _is_future(info): return isinstance(info, importinfo.FromImport) and \ - info.module_name == '__future__' + info.module_name == '__future__' diff --git a/pymode/libs2/rope/refactor/importutils/importinfo.py b/pymode/libs2/rope/refactor/importutils/importinfo.py index 25c8e813..114080aa 100644 --- a/pymode/libs2/rope/refactor/importutils/importinfo.py +++ b/pymode/libs2/rope/refactor/importutils/importinfo.py @@ -84,7 +84,7 @@ def _are_name_and_alias_lists_equal(self, list1, list2): def __eq__(self, obj): return isinstance(obj, self.__class__) and \ - self.get_import_statement() == obj.get_import_statement() + self.get_import_statement() == obj.get_import_statement() def __ne__(self, obj): return not self.__eq__(obj) @@ -147,10 +147,10 @@ def get_imported_resource(self, context): Returns `None` if module was not found. """ if self.level == 0: - return context.pycore.find_module( + return context.project.find_module( self.module_name, folder=context.folder) else: - return context.pycore.find_relative_module( + return context.project.find_relative_module( self.module_name, context.folder, self.level) def get_imported_module(self, context): @@ -160,10 +160,10 @@ def get_imported_module(self, context): could not be found. """ if self.level == 0: - return context.pycore.get_module( + return context.project.get_module( self.module_name, context.folder) else: - return context.pycore.get_relative_module( + return context.project.get_relative_module( self.module_name, context.folder, self.level) def get_import_statement(self): @@ -180,7 +180,7 @@ def is_empty(self): def is_star_import(self): return len(self.names_and_aliases) > 0 and \ - self.names_and_aliases[0][0] == '*' + self.names_and_aliases[0][0] == '*' class EmptyImport(ImportInfo): @@ -196,6 +196,6 @@ def get_imported_primaries(self, context): class ImportContext(object): - def __init__(self, pycore, folder): - self.pycore = pycore + def __init__(self, project, folder): + self.project = project self.folder = folder diff --git a/pymode/libs2/rope/refactor/importutils/module_imports.py b/pymode/libs2/rope/refactor/importutils/module_imports.py index 874213f2..b96eebc4 100644 --- a/pymode/libs2/rope/refactor/importutils/module_imports.py +++ b/pymode/libs2/rope/refactor/importutils/module_imports.py @@ -1,13 +1,14 @@ -import rope.base.pynames -from rope.base import ast, utils -from rope.refactor.importutils import importinfo +from rope.base import ast +from rope.base import pynames +from rope.base import utils from rope.refactor.importutils import actions +from rope.refactor.importutils import importinfo class ModuleImports(object): - def __init__(self, pycore, pymodule, import_filter=None): - self.pycore = pycore + def __init__(self, project, pymodule, import_filter=None): + self.project = project self.pymodule = pymodule self.separating_lines = 0 self.filter = import_filter @@ -15,7 +16,7 @@ def __init__(self, pycore, pymodule, import_filter=None): @property @utils.saveit def imports(self): - finder = _GlobalImportFinder(self.pymodule, self.pycore) + finder = _GlobalImportFinder(self.pymodule) result = finder.find_import_statements() self.separating_lines = finder.get_separating_line_count() if self.filter is not None: @@ -32,15 +33,16 @@ def _get_unbound_names(self, defined_pyobject): def remove_unused_imports(self): can_select = _OneTimeSelector(self._get_unbound_names(self.pymodule)) visitor = actions.RemovingVisitor( - self.pycore, self._current_folder(), can_select) + self.project, self._current_folder(), can_select) for import_statement in self.imports: import_statement.accept(visitor) def get_used_imports(self, defined_pyobject): result = [] - can_select = _OneTimeSelector(self._get_unbound_names(defined_pyobject)) + can_select = _OneTimeSelector( + self._get_unbound_names(defined_pyobject)) visitor = actions.FilteringVisitor( - self.pycore, self._current_folder(), can_select) + self.project, self._current_folder(), can_select) for import_statement in self.imports: new_import = import_statement.accept(visitor) if new_import is not None and not new_import.is_empty(): @@ -48,11 +50,18 @@ def get_used_imports(self, defined_pyobject): return result def get_changed_source(self): - imports = self.imports - after_removing = self._remove_imports(imports) - imports = [stmt for stmt in imports + # Make sure we forward a removed import's preceding blank + # lines count to the following import statement. + prev_stmt = None + for stmt in self.imports: + if prev_stmt is not None and prev_stmt.import_info.is_empty(): + stmt.blank_lines = max(prev_stmt.blank_lines, stmt.blank_lines) + prev_stmt = stmt + # The new list of imports. + imports = [stmt for stmt in self.imports if not stmt.import_info.is_empty()] + after_removing = self._remove_imports(self.imports) first_non_blank = self._first_non_blank_line(after_removing, 0) first_import = self._first_import_line() - 1 result = [] @@ -61,7 +70,6 @@ def get_changed_source(self): # Writing imports sorted_imports = sorted(imports, self._compare_import_locations) for stmt in sorted_imports: - start = self._get_import_location(stmt) if stmt != sorted_imports[0]: result.append('\n' * stmt.blank_lines) result.append(stmt.get_import_statement() + '\n') @@ -111,7 +119,7 @@ def _first_non_blank_line(self, lines, lineno): return result def add_import(self, import_info): - visitor = actions.AddingVisitor(self.pycore, [import_info]) + visitor = actions.AddingVisitor(self.project, [import_info]) for import_statement in self.imports: if import_statement.accept(visitor): break @@ -132,21 +140,21 @@ def _get_new_import_lineno(self): def filter_names(self, can_select): visitor = actions.RemovingVisitor( - self.pycore, self._current_folder(), can_select) + self.project, self._current_folder(), can_select) for import_statement in self.imports: import_statement.accept(visitor) def expand_stars(self): can_select = _OneTimeSelector(self._get_unbound_names(self.pymodule)) visitor = actions.ExpandStarsVisitor( - self.pycore, self._current_folder(), can_select) + self.project, self._current_folder(), can_select) for import_statement in self.imports: import_statement.accept(visitor) def remove_duplicates(self): added_imports = [] for import_stmt in self.imports: - visitor = actions.AddingVisitor(self.pycore, + visitor = actions.AddingVisitor(self.project, [import_stmt.import_info]) for added_import in added_imports: if added_import.accept(visitor): @@ -154,17 +162,34 @@ def remove_duplicates(self): else: added_imports.append(import_stmt) + def force_single_imports(self): + """force a single import per statement""" + for import_stmt in self.imports[:]: + import_info = import_stmt.import_info + if import_info.is_empty(): + continue + if len(import_info.names_and_aliases) > 1: + for name_and_alias in import_info.names_and_aliases: + if hasattr(import_info, "module_name"): + new_import = importinfo.FromImport( + import_info.module_name, import_info.level, + [name_and_alias]) + else: + new_import = importinfo.NormalImport([name_and_alias]) + self.add_import(new_import) + import_stmt.empty_import() + def get_relative_to_absolute_list(self): - visitor = rope.refactor.importutils.actions.RelativeToAbsoluteVisitor( - self.pycore, self._current_folder()) + visitor = actions.RelativeToAbsoluteVisitor( + self.project, self._current_folder()) for import_stmt in self.imports: if not import_stmt.readonly: import_stmt.accept(visitor) return visitor.to_be_absolute def get_self_import_fix_and_rename_list(self): - visitor = rope.refactor.importutils.actions.SelfImportVisitor( - self.pycore, self._current_folder(), self.pymodule.get_resource()) + visitor = actions.SelfImportVisitor( + self.project, self._current_folder(), self.pymodule.get_resource()) for import_stmt in self.imports: if not import_stmt.readonly: import_stmt.accept(visitor) @@ -174,15 +199,19 @@ def _current_folder(self): return self.pymodule.get_resource().parent def sort_imports(self): + if self.project.prefs.get("sort_imports_alphabetically"): + sort_kwargs = dict(key=self._get_import_name) + else: + sort_kwargs = dict(cmp=self._compare_imports) + # IDEA: Sort from import list - visitor = actions.SortingVisitor(self.pycore, self._current_folder()) + visitor = actions.SortingVisitor(self.project, self._current_folder()) for import_statement in self.imports: import_statement.accept(visitor) - in_projects = sorted(visitor.in_project, self._compare_imports) - third_party = sorted(visitor.third_party, self._compare_imports) - standards = sorted(visitor.standard, self._compare_imports) - future = sorted(visitor.future, self._compare_imports) - blank_lines = 0 + in_projects = sorted(visitor.in_project, **sort_kwargs) + third_party = sorted(visitor.third_party, **sort_kwargs) + standards = sorted(visitor.standard, **sort_kwargs) + future = sorted(visitor.future, **sort_kwargs) last_index = self._first_import_line() last_index = self._move_imports(future, last_index, 0) last_index = self._move_imports(standards, last_index, 1) @@ -208,6 +237,14 @@ def _first_import_line(self): break return lineno + def _get_import_name(self, import_stmt): + import_info = import_stmt.import_info + if hasattr(import_info, "module_name"): + return "%s.%s" % (import_info.module_name, + import_info.names_and_aliases[0][0]) + else: + return import_info.names_and_aliases[0][0] + def _compare_imports(self, stmt1, stmt2): str1 = stmt1.get_import_statement() str2 = stmt2.get_import_statement() @@ -229,7 +266,7 @@ def _move_imports(self, imports, index, blank_lines): def handle_long_imports(self, maxdots, maxlength): visitor = actions.LongImportVisitor( - self._current_folder(), self.pycore, maxdots, maxlength) + self._current_folder(), self.project, maxdots, maxlength) for import_statement in self.imports: if not import_statement.readonly: import_statement.accept(visitor) @@ -239,7 +276,7 @@ def handle_long_imports(self, maxdots, maxlength): def remove_pyname(self, pyname): """Removes pyname when imported in ``from mod import x``""" - visitor = actions.RemovePyNameVisitor(self.pycore, self.pymodule, + visitor = actions.RemovePyNameVisitor(self.project, self.pymodule, pyname, self._current_folder()) for import_stmt in self.imports: import_stmt.accept(visitor) @@ -277,7 +314,7 @@ def __init__(self, pyobject): def _visit_child_scope(self, node): pyobject = self.pyobject.get_module().get_scope().\ - get_inner_scope_for_line(node.lineno).pyobject + get_inner_scope_for_line(node.lineno).pyobject visitor = _LocalUnboundNameFinder(pyobject, self) for child in ast.get_child_nodes(node): ast.walk(child, visitor) @@ -324,8 +361,8 @@ def __init__(self, pymodule, wanted_pyobject): self.unbound = set() self.names = set() for name, pyname in pymodule._get_structural_attributes().items(): - if not isinstance(pyname, (rope.base.pynames.ImportedName, - rope.base.pynames.ImportedModule)): + if not isinstance(pyname, (pynames.ImportedName, + pynames.ImportedModule)): self.names.add(name) wanted_scope = wanted_pyobject.get_scope() self.start = wanted_scope.get_start() @@ -374,12 +411,11 @@ def add_unbound(self, name): class _GlobalImportFinder(object): - def __init__(self, pymodule, pycore): + def __init__(self, pymodule): self.current_folder = None if pymodule.get_resource(): self.current_folder = pymodule.get_resource().parent self.pymodule = pymodule - self.pycore = pycore self.imports = [] self.pymodule = pymodule self.lines = self.pymodule.lines @@ -428,13 +464,14 @@ def visit_from(self, node, end_line): if node.level: level = node.level import_info = importinfo.FromImport( - node.module or '', # see comment at rope.base.ast.walk + node.module or '', # see comment at rope.base.ast.walk level, self._get_names(node.names)) start_line = node.lineno self.imports.append(importinfo.ImportStatement( import_info, node.lineno, end_line, self._get_text(start_line, end_line), - blank_lines=self._count_empty_lines_before(start_line))) + blank_lines= + self._count_empty_lines_before(start_line))) def _get_names(self, alias_names): result = [] diff --git a/pymode/libs2/rope/refactor/inline.py b/pymode/libs2/rope/refactor/inline.py index cfd64a7e..0ae1f8f4 100644 --- a/pymode/libs2/rope/refactor/inline.py +++ b/pymode/libs2/rope/refactor/inline.py @@ -21,17 +21,19 @@ import rope.base.exceptions import rope.refactor.functionutils from rope.base import (pynames, pyobjects, codeanalyze, - taskhandle, evaluate, worder, utils) + taskhandle, evaluate, worder, utils, libutils) from rope.base.change import ChangeSet, ChangeContents from rope.refactor import (occurrences, rename, sourceutils, importutils, move, change_signature) + def unique_prefix(): n = 0 while True: yield "__" + str(n) + "__" n += 1 + def create_inline(project, resource, offset): """Create a refactoring object for inlining @@ -39,8 +41,7 @@ def create_inline(project, resource, offset): `InlineMethod`, `InlineVariable` or `InlineParameter`. """ - pycore = project.pycore - pyname = _get_pyname(pycore, resource, offset) + pyname = _get_pyname(project, resource, offset) message = 'Inline refactoring should be performed on ' \ 'a method, local variable or parameter.' if pyname is None: @@ -61,9 +62,8 @@ class _Inliner(object): def __init__(self, project, resource, offset): self.project = project - self.pycore = project.pycore - self.pyname = _get_pyname(self.pycore, resource, offset) - range_finder = worder.Worder(resource.read()) + self.pyname = _get_pyname(project, resource, offset) + range_finder = worder.Worder(resource.read(), True) self.region = range_finder.get_primary_range(offset) self.name = range_finder.get_word_at(offset) self.offset = offset @@ -84,7 +84,7 @@ def __init__(self, *args, **kwds): self.pymodule = self.pyfunction.get_module() self.resource = self.pyfunction.get_module().get_resource() self.occurrence_finder = occurrences.create_finder( - self.pycore, self.name, self.pyname) + self.project, self.name, self.pyname) self.normal_generator = _DefinitionGenerator(self.project, self.pyfunction) self._init_imports() @@ -92,7 +92,7 @@ def __init__(self, *args, **kwds): def _init_imports(self): body = sourceutils.get_body(self.pyfunction) body, imports = move.moving_code_with_imports( - self.pycore, self.resource, body) + self.project, self.resource, body) self.imports = imports self.others_generator = _DefinitionGenerator( self.project, self.pyfunction, body=body) @@ -100,7 +100,6 @@ def _init_imports(self): def _get_scope_range(self): scope = self.pyfunction.get_scope() lines = self.pymodule.lines - logicals = self.pymodule.logical_lines start_line = scope.get_start() if self.pyfunction.decorators: decorators = self.pyfunction.decorators @@ -121,7 +120,7 @@ def get_changes(self, remove=True, only_current=False, resources=None, """ changes = ChangeSet('Inline method <%s>' % self.name) if resources is None: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() if only_current: resources = [self.original] if remove: @@ -132,20 +131,20 @@ def get_changes(self, remove=True, only_current=False, resources=None, job_set.started_job(file.path) if file == self.resource: changes.add_change(self._defining_file_changes( - changes, remove=remove, only_current=only_current)) + changes, remove=remove, only_current=only_current)) else: aim = None if only_current and self.original == file: aim = self.offset handle = _InlineFunctionCallsForModuleHandle( - self.pycore, file, self.others_generator, aim) + self.project, file, self.others_generator, aim) result = move.ModuleSkipRenamer( self.occurrence_finder, file, handle).get_changed_module() if result is not None: - result = _add_imports(self.pycore, result, + result = _add_imports(self.project, result, file, self.imports) if remove: - result = _remove_from(self.pycore, self.pyname, + result = _remove_from(self.project, self.pyname, result, file) changes.add_change(ChangeContents(file, result)) job_set.finished_job() @@ -154,8 +153,6 @@ def get_changes(self, remove=True, only_current=False, resources=None, def _get_removed_range(self): scope = self.pyfunction.get_scope() lines = self.pymodule.lines - logical = self.pymodule.logical_lines - start_line = scope.get_start() start, end = self._get_scope_range() end_line = scope.get_end() for i in range(end_line + 1, lines.length()): @@ -177,7 +174,7 @@ def _defining_file_changes(self, changes, remove, only_current): # we don't want to change any of them aim = len(self.resource.read()) + 100 handle = _InlineFunctionCallsForModuleHandle( - self.pycore, self.resource, + self.project, self.resource, self.normal_generator, aim_offset=aim) replacement = None if remove: @@ -200,7 +197,6 @@ def _is_the_last_method_of_a_class(self): return False class_start, class_end = sourceutils.get_body_region(pyclass) source = self.pymodule.source_code - lines = self.pymodule.lines func_start, func_end = self._get_scope_range() if source[class_start:func_start].strip() == '' and \ source[func_end:class_end].strip() == '': @@ -226,12 +222,12 @@ def _check_exceptional_conditions(self): 'Local variable should be assigned once for inlining.') def get_changes(self, remove=True, only_current=False, resources=None, - task_handle=taskhandle.NullTaskHandle()): + docs=False, task_handle=taskhandle.NullTaskHandle()): if resources is None: if rename._is_local(self.pyname): resources = [self.resource] else: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() if only_current: resources = [self.original] if remove and self.original != self.resource: @@ -243,28 +239,29 @@ def get_changes(self, remove=True, only_current=False, resources=None, for resource in resources: jobset.started_job(resource.path) if resource == self.resource: - source = self._change_main_module(remove, only_current) + source = self._change_main_module(remove, only_current, docs) changes.add_change(ChangeContents(self.resource, source)) else: result = self._change_module(resource, remove, only_current) if result is not None: - result = _add_imports(self.pycore, result, + result = _add_imports(self.project, result, resource, self.imports) changes.add_change(ChangeContents(resource, result)) jobset.finished_job() return changes - def _change_main_module(self, remove, only_current): + def _change_main_module(self, remove, only_current, docs): region = None if only_current and self.original == self.resource: region = self.region - return _inline_variable(self.pycore, self.pymodule, self.pyname, - self.name, remove=remove, region=region) + return _inline_variable(self.project, self.pymodule, self.pyname, + self.name, remove=remove, region=region, + docs=docs) def _init_imports(self): vardef = _getvardef(self.pymodule, self.pyname) self.imported, self.imports = move.moving_code_with_imports( - self.pycore, self.resource, vardef) + self.project, self.resource, vardef) def _change_module(self, resource, remove, only_current): filters = [occurrences.NoImportsFilter(), @@ -275,11 +272,12 @@ def check_aim(occurrence): if self.offset < start or end < self.offset: return False filters.insert(0, check_aim) - finder = occurrences.Finder(self.pycore, self.name, filters=filters) + finder = occurrences.Finder(self.project, self.name, filters=filters) changed = rename.rename_in_module( finder, self.imported, resource=resource, replace_primary=True) if changed and remove: - changed = _remove_from(self.pycore, self.pyname, changed, resource) + changed = _remove_from(self.project, self.pyname, + changed, resource) return changed def get_kind(self): @@ -329,8 +327,9 @@ def _join_lines(lines): class _DefinitionGenerator(object): unique_prefix = unique_prefix() + def __init__(self, project, pyfunction, body=None): - self.pycore = project.pycore + self.project = project self.pyfunction = pyfunction self.pymodule = pyfunction.get_module() self.resource = self.pymodule.get_resource() @@ -360,10 +359,11 @@ def _get_definition_params(self): def get_function_name(self): return self.pyfunction.get_name() - def get_definition(self, primary, pyname, call, host_vars=[],returns=False): + def get_definition(self, primary, pyname, call, host_vars=[], + returns=False): # caching already calculated definitions return self._calculate_definition(primary, pyname, call, - host_vars, returns) + host_vars, returns) def _calculate_header(self, primary, pyname, call): # A header is created which initializes parameters @@ -377,10 +377,6 @@ def _calculate_header(self, primary, pyname, call): paramdict[param_name] = value header = '' to_be_inlined = [] - mod = self.pycore.get_string_module(self.body) - all_names = mod.get_scope().get_names() - assigned_names = [name for name in all_names if - isinstance(all_names[name], rope.base.pynamesdef.AssignedName)] for name, value in paramdict.items(): if name != value and value is not None: header += name + ' = ' + value.replace('\n', ' ') + '\n' @@ -392,32 +388,36 @@ def _calculate_definition(self, primary, pyname, call, host_vars, returns): header, to_be_inlined = self._calculate_header(primary, pyname, call) source = header + self.body - mod = self.pycore.get_string_module(source) + mod = libutils.get_string_module(self.project, source) name_dict = mod.get_scope().get_names() - all_names = [x for x in name_dict if - not isinstance(name_dict[x], rope.base.builtins.BuiltinName)] + all_names = [x for x in name_dict if + not isinstance(name_dict[x], + rope.base.builtins.BuiltinName)] # If there is a name conflict, all variable names # inside the inlined function are renamed if len(set(all_names).intersection(set(host_vars))) > 0: prefix = _DefinitionGenerator.unique_prefix.next() - guest = self.pycore.get_string_module(source, self.resource) + guest = libutils.get_string_module(self.project, source, + self.resource) - to_be_inlined = [prefix+item for item in to_be_inlined] + to_be_inlined = [prefix + item for item in to_be_inlined] for item in all_names: pyname = guest[item] - occurrence_finder = occurrences.create_finder( - self.pycore, item, pyname) + occurrence_finder = occurrences.create_finder(self.project, + item, pyname) source = rename.rename_in_module(occurrence_finder, - prefix+item, pymodule=guest) - guest = self.pycore.get_string_module(source, self.resource) + prefix + item, pymodule=guest) + guest = libutils.get_string_module( + self.project, source, self.resource) #parameters not reassigned inside the functions are now inlined. for name in to_be_inlined: - pymodule = self.pycore.get_string_module(source, self.resource) + pymodule = libutils.get_string_module( + self.project, source, self.resource) pyname = pymodule[name] - source = _inline_variable(self.pycore, pymodule, pyname, name) + source = _inline_variable(self.project, pymodule, pyname, name) return self._replace_returns_with(source, returns) @@ -425,19 +425,22 @@ def _replace_returns_with(self, source, returns): result = [] returned = None last_changed = 0 - for match in _DefinitionGenerator._get_return_pattern().finditer(source): + for match in _DefinitionGenerator._get_return_pattern().finditer( + source): for key, value in match.groupdict().items(): if value and key == 'return': result.append(source[last_changed:match.start('return')]) if returns: self._check_nothing_after_return(source, match.end('return')) + beg_idx = match.end('return') returned = _join_lines( - source[match.end('return'): len(source)].splitlines()) + source[beg_idx:len(source)].splitlines()) last_changed = len(source) else: current = match.end('return') - while current < len(source) and source[current] in ' \t': + while current < len(source) and \ + source[current] in ' \t': current += 1 last_changed = current if current == len(source) or source[current] == '\n': @@ -452,7 +455,8 @@ def _check_nothing_after_return(self, source, offset): lineno = logical_lines.logical_line_in(lineno)[1] if source[lines.get_line_end(lineno):len(source)].strip() != '': raise rope.base.exceptions.RefactoringError( - 'Cannot inline functions with statements after return statement.') + 'Cannot inline functions with statements ' + + 'after return statement.') @classmethod def _get_return_pattern(cls): @@ -471,7 +475,7 @@ def named_pattern(name, list_): class _InlineFunctionCallsForModuleHandle(object): - def __init__(self, pycore, resource, + def __init__(self, project, resource, definition_generator, aim_offset=None): """Inlines occurrences @@ -479,7 +483,7 @@ def __init__(self, pycore, resource, `aim` offset will be inlined. """ - self.pycore = pycore + self.project = project self.generator = definition_generator self.resource = resource self.aim = aim_offset @@ -504,24 +508,24 @@ def occurred_outside_skip(self, change_collector, occurrence): end_parens = self._find_end_parens(self.source, end - 1) lineno = self.lines.get_line_number(start) start_line, end_line = self.pymodule.logical_lines.\ - logical_line_in(lineno) + logical_line_in(lineno) line_start = self.lines.get_line_start(start_line) line_end = self.lines.get_line_end(end_line) - returns = self.source[line_start:start].strip() != '' or \ - self.source[end_parens:line_end].strip() != '' + self.source[end_parens:line_end].strip() != '' indents = sourceutils.get_indents(self.lines, start_line) primary, pyname = occurrence.get_primary_and_pyname() - host = self.pycore.resource_to_pyobject(self.resource) + host = self.pymodule scope = host.scope.get_inner_scope_for_line(lineno) definition, returned = self.generator.get_definition( - primary, pyname, self.source[start:end_parens], scope.get_names(), returns=returns) + primary, pyname, self.source[start:end_parens], scope.get_names(), + returns=returns) end = min(line_end + 1, len(self.source)) - change_collector.add_change(line_start, end, - sourceutils.fix_indentation(definition, indents)) + change_collector.add_change( + line_start, end, sourceutils.fix_indentation(definition, indents)) if returns: name = returned if name is None: @@ -537,7 +541,7 @@ def _find_end_parens(self, source, offset): @property @utils.saveit def pymodule(self): - return self.pycore.resource_to_pyobject(self.resource) + return self.project.get_pymodule(self.resource) @property @utils.saveit @@ -553,12 +557,13 @@ def lines(self): return self.pymodule.lines -def _inline_variable(pycore, pymodule, pyname, name, - remove=True, region=None): +def _inline_variable(project, pymodule, pyname, name, + remove=True, region=None, docs=False): definition = _getvardef(pymodule, pyname) start, end = _assigned_lineno(pymodule, pyname) - occurrence_finder = occurrences.create_finder(pycore, name, pyname) + occurrence_finder = occurrences.create_finder(project, name, pyname, + docs=docs) changed_source = rename.rename_in_module( occurrence_finder, definition, pymodule=pymodule, replace_primary=True, writes=False, region=region) @@ -567,11 +572,12 @@ def _inline_variable(pycore, pymodule, pyname, name, if remove: lines = codeanalyze.SourceLinesAdapter(changed_source) source = changed_source[:lines.get_line_start(start)] + \ - changed_source[lines.get_line_end(end) + 1:] + changed_source[lines.get_line_end(end) + 1:] else: source = changed_source return source + def _getvardef(pymodule, pyname): assignment = pyname.assignments[0] lines = pymodule.lines @@ -581,35 +587,39 @@ def _getvardef(pymodule, pyname): if assignment.levels: raise rope.base.exceptions.RefactoringError( 'Cannot inline tuple assignments.') - definition = definition_with_assignment[definition_with_assignment.\ + definition = definition_with_assignment[definition_with_assignment. index('=') + 1:].strip() return definition + def _assigned_lineno(pymodule, pyname): definition_line = pyname.assignments[0].ast_node.lineno return pymodule.logical_lines.logical_line_in(definition_line) -def _add_imports(pycore, source, resource, imports): + +def _add_imports(project, source, resource, imports): if not imports: return source - pymodule = pycore.get_string_module(source, resource) - module_import = importutils.get_module_imports(pycore, pymodule) + pymodule = libutils.get_string_module(project, source, resource) + module_import = importutils.get_module_imports(project, pymodule) for import_info in imports: module_import.add_import(import_info) source = module_import.get_changed_source() - pymodule = pycore.get_string_module(source, resource) - import_tools = importutils.ImportTools(pycore) + pymodule = libutils.get_string_module(project, source, resource) + import_tools = importutils.ImportTools(project) return import_tools.organize_imports(pymodule, unused=False, sort=False) -def _get_pyname(pycore, resource, offset): - pymodule = pycore.resource_to_pyobject(resource) + +def _get_pyname(project, resource, offset): + pymodule = project.get_pymodule(resource) pyname = evaluate.eval_location(pymodule, offset) if isinstance(pyname, pynames.ImportedName): pyname = pyname._get_imported_pyname() return pyname -def _remove_from(pycore, pyname, source, resource): - pymodule = pycore.get_string_module(source, resource) - module_import = importutils.get_module_imports(pycore, pymodule) + +def _remove_from(project, pyname, source, resource): + pymodule = libutils.get_string_module(project, source, resource) + module_import = importutils.get_module_imports(project, pymodule) module_import.remove_pyname(pyname) return module_import.get_changed_source() diff --git a/pymode/libs2/rope/refactor/introduce_factory.py b/pymode/libs2/rope/refactor/introduce_factory.py index 5a885587..7532e361 100644 --- a/pymode/libs2/rope/refactor/introduce_factory.py +++ b/pymode/libs2/rope/refactor/introduce_factory.py @@ -1,5 +1,6 @@ import rope.base.exceptions import rope.base.pyobjects +from rope.base import libutils from rope.base import taskhandle, evaluate from rope.base.change import (ChangeSet, ChangeContents) from rope.refactor import rename, occurrences, sourceutils, importutils @@ -8,13 +9,14 @@ class IntroduceFactory(object): def __init__(self, project, resource, offset): - self.pycore = project.pycore + self.project = project self.offset = offset - this_pymodule = self.pycore.resource_to_pyobject(resource) + this_pymodule = self.project.get_pymodule(resource) self.old_pyname = evaluate.eval_location(this_pymodule, offset) - if self.old_pyname is None or not isinstance(self.old_pyname.get_object(), - rope.base.pyobjects.PyClass): + if self.old_pyname is None or \ + not isinstance(self.old_pyname.get_object(), + rope.base.pyobjects.PyClass): raise rope.base.exceptions.RefactoringError( 'Introduce factory should be performed on a class.') self.old_name = self.old_pyname.get_object().get_name() @@ -35,7 +37,7 @@ def get_changes(self, factory_name, global_factory=False, resources=None, """ if resources is None: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() changes = ChangeSet('Introduce factory method <%s>' % factory_name) job_set = task_handle.create_jobset('Collecting Changes', len(resources)) @@ -64,11 +66,11 @@ def _change_module(self, resources, changes, global_) if changed_code is not None: if global_: - new_pymodule = self.pycore.get_string_module(changed_code, - self.resource) - modname = self.pycore.modname(self.resource) + new_pymodule = libutils.get_string_module( + self.project, changed_code, self.resource) + modname = libutils.modname(self.resource) changed_code, imported = importutils.add_import( - self.pycore, new_pymodule, modname, factory_name) + self.project, new_pymodule, modname, factory_name) changed_code = changed_code.replace(replacement, imported) changes.add_change(ChangeContents(file_, changed_code)) job_set.finished_job() @@ -81,8 +83,8 @@ def _change_resource(self, changes, factory_name, global_): if source_code is None: source_code = self.pymodule.source_code else: - self.pymodule = self.pycore.get_string_module( - source_code, resource=self.resource) + self.pymodule = libutils.get_string_module( + self.project, source_code, resource=self.resource) lines = self.pymodule.lines start = self._get_insertion_offset(class_scope, lines) result = source_code[:start] @@ -100,7 +102,7 @@ def _get_insertion_offset(self, class_scope, lines): def _get_factory_method(self, lines, class_scope, factory_name, global_): - unit_indents = ' ' * sourceutils.get_indent(self.pycore) + unit_indents = ' ' * sourceutils.get_indent(self.project) if global_: if self._get_scope_indents(lines, class_scope) > 0: raise rope.base.exceptions.RefactoringError( @@ -111,7 +113,7 @@ def _get_factory_method(self, lines, class_scope, ('@staticmethod\ndef %s(*args, **kwds):\n' % factory_name + '%sreturn %s(*args, **kwds)\n' % (unit_indents, self.old_name)) indents = self._get_scope_indents(lines, class_scope) + \ - sourceutils.get_indent(self.pycore) + sourceutils.get_indent(self.project) return '\n' + sourceutils.indent_lines(unindented_factory, indents) def _get_scope_indents(self, lines, scope): @@ -124,7 +126,7 @@ def _new_function_name(self, factory_name, global_): return self.old_name + '.' + factory_name def _rename_occurrences(self, file_, changed_name, global_factory): - finder = occurrences.create_finder(self.pycore, self.old_name, + finder = occurrences.create_finder(self.project, self.old_name, self.old_pyname, only_calls=True) result = rename.rename_in_module(finder, changed_name, resource=file_, replace_primary=global_factory) diff --git a/pymode/libs2/rope/refactor/introduce_parameter.py b/pymode/libs2/rope/refactor/introduce_parameter.py index 312c61aa..43d6f755 100644 --- a/pymode/libs2/rope/refactor/introduce_parameter.py +++ b/pymode/libs2/rope/refactor/introduce_parameter.py @@ -35,10 +35,10 @@ def f(p=a.var): """ def __init__(self, project, resource, offset): - self.pycore = project.pycore + self.project = project self.resource = resource self.offset = offset - self.pymodule = self.pycore.resource_to_pyobject(self.resource) + self.pymodule = self.project.get_pymodule(self.resource) scope = self.pymodule.get_scope().get_inner_scope_for_offset(offset) if scope.get_kind() != 'Function': raise exceptions.RefactoringError( @@ -79,7 +79,7 @@ def _get_header_offsets(self): lines = self.pymodule.lines start_line = self.pyfunction.get_scope().get_start() end_line = self.pymodule.logical_lines.\ - logical_line_in(start_line)[1] + logical_line_in(start_line)[1] start = lines.get_line_start(start_line) end = lines.get_line_end(end_line) start = self.pymodule.source_code.find('def', start) + 4 @@ -88,7 +88,8 @@ def _get_header_offsets(self): def _change_function_occurances(self, collector, function_start, function_end, new_name): - finder = occurrences.create_finder(self.pycore, self.name, self.pyname) + finder = occurrences.create_finder(self.project, self.name, + self.pyname) for occurrence in finder.find_occurrences(resource=self.resource): start, end = occurrence.get_primary_range() if function_start <= start < function_end: diff --git a/pymode/libs2/rope/refactor/localtofield.py b/pymode/libs2/rope/refactor/localtofield.py index 532d4c9e..f276070f 100644 --- a/pymode/libs2/rope/refactor/localtofield.py +++ b/pymode/libs2/rope/refactor/localtofield.py @@ -6,13 +6,12 @@ class LocalToField(object): def __init__(self, project, resource, offset): self.project = project - self.pycore = project.pycore self.resource = resource self.offset = offset def get_changes(self): name = worder.get_name_at(self.resource, self.offset) - this_pymodule = self.pycore.resource_to_pyobject(self.resource) + this_pymodule = self.project.get_pymodule(self.resource) pyname = evaluate.eval_location(this_pymodule, self.offset) if not self._is_a_method_local(pyname): raise exceptions.RefactoringError( @@ -26,7 +25,7 @@ def get_changes(self): new_name = self._get_field_name(function_scope.pyobject, name) changes = Rename(self.project, self.resource, self.offset).\ - get_changes(new_name, resources=[self.resource]) + get_changes(new_name, resources=[self.resource]) return changes def _check_redefinition(self, name, function_scope): @@ -45,6 +44,6 @@ def _is_a_method_local(self, pyname): holding_scope = pymodule.get_scope().get_inner_scope_for_line(lineno) parent = holding_scope.parent return isinstance(pyname, pynames.AssignedName) and \ - pyname in holding_scope.get_names().values() and \ - holding_scope.get_kind() == 'Function' and \ - parent is not None and parent.get_kind() == 'Class' + pyname in holding_scope.get_names().values() and \ + holding_scope.get_kind() == 'Function' and \ + parent is not None and parent.get_kind() == 'Class' diff --git a/pymode/libs2/rope/refactor/method_object.py b/pymode/libs2/rope/refactor/method_object.py index b3dd6bdd..29ce429d 100644 --- a/pymode/libs2/rope/refactor/method_object.py +++ b/pymode/libs2/rope/refactor/method_object.py @@ -1,5 +1,6 @@ import warnings +from rope.base import libutils from rope.base import pyobjects, exceptions, change, evaluate, codeanalyze from rope.refactor import sourceutils, occurrences, rename @@ -7,8 +8,8 @@ class MethodObject(object): def __init__(self, project, resource, offset): - self.pycore = project.pycore - this_pymodule = self.pycore.resource_to_pyobject(resource) + self.project = project + this_pymodule = self.project.get_pymodule(resource) pyname = evaluate.eval_location(this_pymodule, offset) if pyname is None or not isinstance(pyname.get_object(), pyobjects.PyFunction): @@ -21,10 +22,10 @@ def __init__(self, project, resource, offset): def get_new_class(self, name): body = sourceutils.fix_indentation( - self._get_body(), sourceutils.get_indent(self.pycore) * 2) + self._get_body(), sourceutils.get_indent(self.project) * 2) return 'class %s(object):\n\n%s%sdef __call__(self):\n%s' % \ (name, self._get_init(), - ' ' * sourceutils.get_indent(self.pycore), body) + ' ' * sourceutils.get_indent(self.project), body) def get_changes(self, classname=None, new_class_name=None): if new_class_name is not None: @@ -36,14 +37,15 @@ def get_changes(self, classname=None, new_class_name=None): start, end = sourceutils.get_body_region(self.pyfunction) indents = sourceutils.get_indents( self.pymodule.lines, self.pyfunction.get_scope().get_start()) + \ - sourceutils.get_indent(self.pycore) + sourceutils.get_indent(self.project) new_contents = ' ' * indents + 'return %s(%s)()\n' % \ (classname, ', '.join(self._get_parameter_names())) collector.add_change(start, end, new_contents) insertion = self._get_class_insertion_point() collector.add_change(insertion, insertion, '\n\n' + self.get_new_class(classname)) - changes = change.ChangeSet('Replace method with method object refactoring') + changes = change.ChangeSet( + 'Replace method with method object refactoring') changes.add_change(change.ChangeContents(self.resource, collector.get_changed())) return changes @@ -59,9 +61,10 @@ def _get_body(self): body = sourceutils.get_body(self.pyfunction) for param in self._get_parameter_names(): body = param + ' = None\n' + body - pymod = self.pycore.get_string_module(body, self.resource) + pymod = libutils.get_string_module( + self.project, body, self.resource) pyname = pymod[param] - finder = occurrences.create_finder(self.pycore, param, pyname) + finder = occurrences.create_finder(self.project, param, pyname) result = rename.rename_in_module(finder, 'self.' + param, pymodule=pymod) body = result[result.index('\n') + 1:] @@ -69,7 +72,7 @@ def _get_body(self): def _get_init(self): params = self._get_parameter_names() - indents = ' ' * sourceutils.get_indent(self.pycore) + indents = ' ' * sourceutils.get_indent(self.project) if not params: return '' header = indents + 'def __init__(self' diff --git a/pymode/libs2/rope/refactor/move.py b/pymode/libs2/rope/refactor/move.py index c8761011..60df493e 100644 --- a/pymode/libs2/rope/refactor/move.py +++ b/pymode/libs2/rope/refactor/move.py @@ -4,9 +4,11 @@ based on inputs. """ -from rope.base import pyobjects, codeanalyze, exceptions, pynames, taskhandle, evaluate, worder +from rope.base import (pyobjects, codeanalyze, exceptions, pynames, + taskhandle, evaluate, worder, libutils) from rope.base.change import ChangeSet, ChangeContents, MoveResource -from rope.refactor import importutils, rename, occurrences, sourceutils, functionutils +from rope.refactor import importutils, rename, occurrences, sourceutils, \ + functionutils def create_move(project, resource, offset=None): @@ -18,7 +20,7 @@ def create_move(project, resource, offset=None): """ if offset is None: return MoveModule(project, resource) - this_pymodule = project.pycore.resource_to_pyobject(resource) + this_pymodule = project.get_pymodule(resource) pyname = evaluate.eval_location(this_pymodule, offset) if pyname is None: raise exceptions.RefactoringError( @@ -48,8 +50,7 @@ class MoveMethod(object): def __init__(self, project, resource, offset): self.project = project - self.pycore = project.pycore - this_pymodule = self.pycore.resource_to_pyobject(resource) + this_pymodule = self.project.get_pymodule(resource) pyname = evaluate.eval_location(this_pymodule, offset) self.method_name = worder.get_name_at(resource, offset) self.pyfunction = pyname.get_object() @@ -73,7 +74,7 @@ def get_changes(self, dest_attr, new_name=None, resources=None, """ changes = ChangeSet('Moving method <%s>' % self.method_name) if resources is None: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() if new_name is None: new_name = self.get_method_name() resource1, start1, end1, new_content1 = \ @@ -89,11 +90,11 @@ def get_changes(self, dest_attr, new_name=None, resources=None, collector2 = codeanalyze.ChangeCollector(resource2.read()) collector2.add_change(start2, end2, new_content2) result = collector2.get_changed() - import_tools = importutils.ImportTools(self.pycore) + import_tools = importutils.ImportTools(self.project) new_imports = self._get_used_imports(import_tools) if new_imports: - goal_pymodule = self.pycore.get_string_module(result, - resource2) + goal_pymodule = libutils.get_string_module( + self.project, result, resource2) result = _add_imports_to_module( import_tools, goal_pymodule, new_imports) if resource2 in resources: @@ -108,13 +109,13 @@ def get_method_name(self): return self.method_name def _get_used_imports(self, import_tools): - return importutils.get_imports(self.pycore, self.pyfunction) + return importutils.get_imports(self.project, self.pyfunction) def _get_changes_made_by_old_class(self, dest_attr, new_name): pymodule = self.pyfunction.get_module() indents = self._get_scope_indents(self.pyfunction) - body = 'return self.%s.%s(%s)\n' % (dest_attr, new_name, - self._get_passed_arguments_string()) + body = 'return self.%s.%s(%s)\n' % ( + dest_attr, new_name, self._get_passed_arguments_string()) region = sourceutils.get_body_region(self.pyfunction) return (pymodule.get_resource(), region[0], region[1], sourceutils.fix_indentation(body, indents)) @@ -123,7 +124,7 @@ def _get_scope_indents(self, pyobject): pymodule = pyobject.get_module() return sourceutils.get_indents( pymodule.lines, pyobject.get_scope().get_start()) + \ - sourceutils.get_indent(self.pycore) + sourceutils.get_indent(self.project) def _get_changes_made_by_new_class(self, dest_attr, new_name): old_pyclass = self.pyfunction.parent @@ -150,7 +151,7 @@ def get_new_method(self, name): return '%s\n%s' % ( self._get_new_header(name), sourceutils.fix_indentation(self._get_body(), - sourceutils.get_indent(self.pycore))) + sourceutils.get_indent(self.project))) def _get_unchanged_body(self): return sourceutils.get_body(self.pyfunction) @@ -158,9 +159,9 @@ def _get_unchanged_body(self): def _get_body(self, host='host'): self_name = self._get_self_name() body = self_name + ' = None\n' + self._get_unchanged_body() - pymodule = self.pycore.get_string_module(body) + pymodule = libutils.get_string_module(self.project, body) finder = occurrences.create_finder( - self.pycore, self_name, pymodule[self_name]) + self.project, self_name, pymodule[self_name]) result = rename.rename_in_module(finder, host, pymodule=pymodule) if result is None: result = body @@ -199,26 +200,28 @@ class MoveGlobal(object): """For moving global function and classes""" def __init__(self, project, resource, offset): - self.pycore = project.pycore - this_pymodule = self.pycore.resource_to_pyobject(resource) + self.project = project + this_pymodule = self.project.get_pymodule(resource) self.old_pyname = evaluate.eval_location(this_pymodule, offset) self.old_name = self.old_pyname.get_object().get_name() pymodule = self.old_pyname.get_object().get_module() self.source = pymodule.get_resource() - self.tools = _MoveTools(self.pycore, self.source, + self.tools = _MoveTools(self.project, self.source, self.old_pyname, self.old_name) self.import_tools = self.tools.import_tools self._check_exceptional_conditions() def _check_exceptional_conditions(self): if self.old_pyname is None or \ - not isinstance(self.old_pyname.get_object(), pyobjects.PyDefinedObject): + not isinstance(self.old_pyname.get_object(), + pyobjects.PyDefinedObject): raise exceptions.RefactoringError( 'Move refactoring should be performed on a class/function.') moving_pyobject = self.old_pyname.get_object() if not self._is_global(moving_pyobject): raise exceptions.RefactoringError( - 'Move refactoring should be performed on a global class/function.') + 'Move refactoring should be performed ' + + 'on a global class/function.') def _is_global(self, pyobject): return pyobject.get_scope().parent == pyobject.get_module().get_scope() @@ -226,7 +229,7 @@ def _is_global(self, pyobject): def get_changes(self, dest, resources=None, task_handle=taskhandle.NullTaskHandle()): if resources is None: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() if dest is None or not dest.exists(): raise exceptions.RefactoringError( 'Move destination does not exist.') @@ -251,7 +254,7 @@ def _calculate_changes(self, dest, resources, task_handle): elif file_ == dest: changes.add_change(self._dest_module_changes(dest)) elif self.tools.occurs_in_module(resource=file_): - pymodule = self.pycore.resource_to_pyobject(file_) + pymodule = self.project.get_pymodule(file_) # Changing occurrences placeholder = '__rope_renaming_%s_' % self.old_name source = self.tools.rename_in_module(placeholder, @@ -264,7 +267,8 @@ def _calculate_changes(self, dest, resources, task_handle): if should_import: pymodule = self.tools.new_pymodule(pymodule, source) source, imported = importutils.add_import( - self.pycore, pymodule, self._new_modname(dest), self.old_name) + self.project, pymodule, self._new_modname(dest), + self.old_name) source = source.replace(placeholder, imported) source = self.tools.new_source(pymodule, source) if source != file_.read(): @@ -276,25 +280,26 @@ def _source_module_changes(self, dest): placeholder = '__rope_moving_%s_' % self.old_name handle = _ChangeMoveOccurrencesHandle(placeholder) occurrence_finder = occurrences.create_finder( - self.pycore, self.old_name, self.old_pyname) + self.project, self.old_name, self.old_pyname) start, end = self._get_moving_region() renamer = ModuleSkipRenamer(occurrence_finder, self.source, handle, start, end) source = renamer.get_changed_module() if handle.occurred: - pymodule = self.pycore.get_string_module(source, self.source) + pymodule = libutils.get_string_module( + self.project, source, self.source) # Adding new import source, imported = importutils.add_import( - self.pycore, pymodule, self._new_modname(dest), self.old_name) + self.project, pymodule, self._new_modname(dest), self.old_name) source = source.replace(placeholder, imported) return ChangeContents(self.source, source) def _new_modname(self, dest): - return self.pycore.modname(dest) + return libutils.modname(dest) def _dest_module_changes(self, dest): # Changing occurrences - pymodule = self.pycore.resource_to_pyobject(dest) + pymodule = self.project.get_pymodule(dest) source = self.tools.rename_in_module(self.old_name, pymodule) pymodule = self.tools.new_pymodule(pymodule, source) @@ -310,7 +315,8 @@ def _dest_module_changes(self, dest): lineno = module_with_imports.imports[-1].end_line - 1 else: while lineno < pymodule.lines.length() and \ - pymodule.lines.get_line(lineno + 1).lstrip().startswith('#'): + pymodule.lines.get_line(lineno + 1).\ + lstrip().startswith('#'): lineno += 1 if lineno > 0: cut = pymodule.lines.get_line_end(lineno) + 1 @@ -320,17 +326,18 @@ def _dest_module_changes(self, dest): # Organizing imports source = result - pymodule = self.pycore.get_string_module(source, dest) + pymodule = libutils.get_string_module(self.project, source, dest) source = self.import_tools.organize_imports(pymodule, sort=False, unused=False) return ChangeContents(dest, source) def _get_moving_element_with_imports(self): return moving_code_with_imports( - self.pycore, self.source, self._get_moving_element()) + self.project, self.source, self._get_moving_element()) def _get_module_with_imports(self, source_code, resource): - pymodule = self.pycore.get_string_module(source_code, resource) + pymodule = libutils.get_string_module( + self.project, source_code, resource) return self.import_tools.module_imports(pymodule) def _get_moving_element(self): @@ -339,13 +346,13 @@ def _get_moving_element(self): return moving.rstrip() + '\n' def _get_moving_region(self): - pymodule = self.pycore.resource_to_pyobject(self.source) + pymodule = self.project.get_pymodule(self.source) lines = pymodule.lines scope = self.old_pyname.get_object().get_scope() start = lines.get_line_start(scope.get_start()) end_line = scope.get_end() while end_line < lines.length() and \ - lines.get_line(end_line + 1).strip() == '': + lines.get_line(end_line + 1).strip() == '': end_line += 1 end = min(lines.get_line_end(end_line) + 1, len(pymodule.source_code)) return start, end @@ -356,7 +363,8 @@ def _add_imports2(self, pymodule, new_imports): return pymodule, False else: resource = pymodule.get_resource() - pymodule = self.pycore.get_string_module(source, resource) + pymodule = libutils.get_string_module( + self.project, source, resource) return pymodule, True @@ -365,13 +373,12 @@ class MoveModule(object): def __init__(self, project, resource): self.project = project - self.pycore = project.pycore if not resource.is_folder() and resource.name == '__init__.py': resource = resource.parent if resource.is_folder() and not resource.has_child('__init__.py'): raise exceptions.RefactoringError( 'Cannot move non-package folder.') - dummy_pymodule = self.pycore.get_string_module('') + dummy_pymodule = libutils.get_string_module(self.project, '') self.old_pyname = pynames.ImportedModule(dummy_pymodule, resource=resource) self.source = self.old_pyname.get_object().get_resource() @@ -379,15 +386,14 @@ def __init__(self, project, resource): self.old_name = self.source.name else: self.old_name = self.source.name[:-3] - self.tools = _MoveTools(self.pycore, self.source, + self.tools = _MoveTools(self.project, self.source, self.old_pyname, self.old_name) self.import_tools = self.tools.import_tools def get_changes(self, dest, resources=None, task_handle=taskhandle.NullTaskHandle()): - moving_pyobject = self.old_pyname.get_object() if resources is None: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() if dest is None or not dest.is_folder(): raise exceptions.RefactoringError( 'Move destination for modules should be packages.') @@ -412,7 +418,7 @@ def _calculate_changes(self, dest, resources, task_handle): return changes def _new_modname(self, dest): - destname = self.pycore.modname(dest) + destname = libutils.modname(dest) if destname: return destname + '.' + self.old_name return self.old_name @@ -422,7 +428,7 @@ def _new_import(self, dest): def _change_moving_module(self, changes, dest): if not self.source.is_folder(): - pymodule = self.pycore.resource_to_pyobject(self.source) + pymodule = self.project.get_pymodule(self.source) source = self.import_tools.relatives_to_absolutes(pymodule) pymodule = self.tools.new_pymodule(pymodule, source) source = self._change_occurrences_in_module(dest, pymodule) @@ -436,11 +442,24 @@ def _change_occurrences_in_module(self, dest, pymodule=None, resource=resource): return if pymodule is None: - pymodule = self.pycore.resource_to_pyobject(resource) + pymodule = self.project.get_pymodule(resource) new_name = self._new_modname(dest) + module_imports = importutils.get_module_imports(self.project, pymodule) + changed = False + + source = None + if libutils.modname(dest): + changed = self._change_import_statements(dest, new_name, + module_imports) + if changed: + source = module_imports.get_changed_source() + source = self.tools.new_source(pymodule, source) + pymodule = self.tools.new_pymodule(pymodule, source) + new_import = self._new_import(dest) source = self.tools.rename_in_module( - new_name, imports=True, pymodule=pymodule, resource=resource) + new_name, imports=True, pymodule=pymodule, + resource=resource if not changed else None) should_import = self.tools.occurs_in_module( pymodule=pymodule, resource=resource, imports=False) pymodule = self.tools.new_pymodule(pymodule, source) @@ -449,8 +468,75 @@ def _change_occurrences_in_module(self, dest, pymodule=None, pymodule = self.tools.new_pymodule(pymodule, source) source = self.tools.add_imports(pymodule, [new_import]) source = self.tools.new_source(pymodule, source) - if source != pymodule.resource.read(): + if source is not None and source != pymodule.resource.read(): return source + return None + + + def _change_import_statements(self, dest, new_name, module_imports): + moving_module = self.source + parent_module = moving_module.parent + + changed = False + for import_stmt in module_imports.imports: + if not any(name_and_alias[0] == self.old_name + for name_and_alias in + import_stmt.import_info.names_and_aliases) and \ + not any(name_and_alias[0] == libutils.modname(self.source) + for name_and_alias in + import_stmt.import_info.names_and_aliases): + continue + + # Case 1: Look for normal imports of the moving module. + if isinstance(import_stmt.import_info, importutils.NormalImport): + continue + + # Case 2: The moving module is from-imported. + changed = self._handle_moving_in_from_import_stmt( + dest, import_stmt, module_imports, parent_module) or changed + + # Case 3: Names are imported from the moving module. + context = importutils.importinfo.ImportContext(self.project, None) + if not import_stmt.import_info.is_empty() and \ + import_stmt.import_info.get_imported_resource(context) == \ + moving_module: + import_stmt.import_info = importutils.FromImport( + new_name, import_stmt.import_info.level, + import_stmt.import_info.names_and_aliases) + changed = True + + return changed + + def _handle_moving_in_from_import_stmt(self, dest, import_stmt, + module_imports, parent_module): + changed = False + context = importutils.importinfo.ImportContext(self.project, None) + if import_stmt.import_info.get_imported_resource(context) == \ + parent_module: + imports = import_stmt.import_info.names_and_aliases + new_imports = [] + for name, alias in imports: + # The moving module was imported. + if name == self.old_name: + changed = True + new_import = importutils.FromImport( + libutils.modname(dest), 0, + [(self.old_name, alias)]) + module_imports.add_import(new_import) + else: + new_imports.append((name, alias)) + + # Update the imports if the imported names were changed. + if new_imports != imports: + changed = True + if new_imports: + import_stmt.import_info = importutils.FromImport( + import_stmt.import_info.module_name, + import_stmt.import_info.level, + new_imports) + else: + import_stmt.empty_import() + return changed class _ChangeMoveOccurrencesHandle(object): @@ -470,20 +556,22 @@ def occurred_outside_skip(self, change_collector, occurrence): class _MoveTools(object): - def __init__(self, pycore, source, pyname, old_name): - self.pycore = pycore + def __init__(self, project, source, pyname, old_name): + self.project = project self.source = source self.old_pyname = pyname self.old_name = old_name - self.import_tools = importutils.ImportTools(self.pycore) + self.import_tools = importutils.ImportTools(self.project) def remove_old_imports(self, pymodule): old_source = pymodule.source_code module_with_imports = self.import_tools.module_imports(pymodule) + class CanSelect(object): changed = False old_name = self.old_name old_pyname = self.old_pyname + def __call__(self, name): try: if name == self.old_name and \ @@ -501,7 +589,7 @@ def __call__(self, name): return new_source def rename_in_module(self, new_name, pymodule=None, - imports=False, resource=None): + imports=False, resource=None): occurrence_finder = self._create_finder(imports) source = rename.rename_in_module( occurrence_finder, new_name, replace_primary=True, @@ -516,13 +604,13 @@ def occurs_in_module(self, pymodule=None, resource=None, imports=True): return False def _create_finder(self, imports): - return occurrences.create_finder(self.pycore, self.old_name, + return occurrences.create_finder(self.project, self.old_name, self.old_pyname, imports=imports) def new_pymodule(self, pymodule, source): if source is not None: - return self.pycore.get_string_module( - source, pymodule.get_resource()) + return libutils.get_string_module( + self.project, source, pymodule.get_resource()) return pymodule def new_source(self, pymodule, source): @@ -541,10 +629,10 @@ def _add_imports_to_module(import_tools, pymodule, new_imports): return module_with_imports.get_changed_source() -def moving_code_with_imports(pycore, resource, source): - import_tools = importutils.ImportTools(pycore) - pymodule = pycore.get_string_module(source, resource) - origin = pycore.resource_to_pyobject(resource) +def moving_code_with_imports(project, resource, source): + import_tools = importutils.ImportTools(project) + pymodule = libutils.get_string_module(project, source, resource) + origin = project.get_pymodule(resource) imports = [] for stmt in import_tools.module_imports(origin).imports: @@ -557,12 +645,12 @@ def moving_code_with_imports(pycore, resource, source): imports.append(import_tools.get_from_import(resource, back_names)) source = _add_imports_to_module(import_tools, pymodule, imports) - pymodule = pycore.get_string_module(source, resource) + pymodule = libutils.get_string_module(project, source, resource) source = import_tools.relatives_to_absolutes(pymodule) - pymodule = pycore.get_string_module(source, resource) + pymodule = libutils.get_string_module(project, source, resource) source = import_tools.organize_imports(pymodule, selfs=False) - pymodule = pycore.get_string_module(source, resource) + pymodule = libutils.get_string_module(project, source, resource) # extracting imports after changes module_imports = import_tools.module_imports(pymodule) @@ -610,7 +698,7 @@ def __init__(self, occurrence_finder, resource, handle=None, self.replacement = replacement self.handle = handle if self.handle is None: - self.handle = ModuleSkipHandle() + self.handle = ModuleSkipRenamerHandle() def get_changed_module(self): source = self.resource.read() @@ -618,7 +706,8 @@ def get_changed_module(self): if self.replacement is not None: change_collector.add_change(self.skip_start, self.skip_end, self.replacement) - for occurrence in self.occurrence_finder.find_occurrences(self.resource): + for occurrence in self.occurrence_finder.find_occurrences( + self.resource): start, end = occurrence.get_primary_range() if self.skip_start <= start < self.skip_end: self.handle.occurred_inside_skip(change_collector, occurrence) diff --git a/pymode/libs2/rope/refactor/multiproject.py b/pymode/libs2/rope/refactor/multiproject.py index 6a85d2a2..ac243bda 100644 --- a/pymode/libs2/rope/refactor/multiproject.py +++ b/pymode/libs2/rope/refactor/multiproject.py @@ -1,11 +1,11 @@ """This module can be used for performing cross-project refactorings -See the "cross-project refactorings" section of ``docs/library.txt`` +See the "cross-project refactorings" section of ``docs/library.rst`` file. """ -from rope.base import resources, project, libutils +from rope.base import resources, libutils class MultiProjectRefactoring(object): @@ -33,7 +33,7 @@ def __init__(self, refactoring, other_projects, addpath, self.refactoring = refactoring self.projects = [project] + other_projects for other_project in other_projects: - for folder in self.project.pycore.get_source_folders(): + for folder in self.project.get_source_folders(): other_project.get_prefs().add('python_path', folder.real_path) self.refactorings = [] for other in self.projects: @@ -57,7 +57,7 @@ def _resources_for_args(self, project, args, kwds): newkwds = dict((name, self._change_project_resource(project, value)) for name, value in kwds.items()) return newargs, newkwds - + def _change_project_resource(self, project, obj): if isinstance(obj, resources.Resource) and \ obj.project != project: diff --git a/pymode/libs2/rope/refactor/occurrences.py b/pymode/libs2/rope/refactor/occurrences.py index 2808ed2c..14a2d7de 100644 --- a/pymode/libs2/rope/refactor/occurrences.py +++ b/pymode/libs2/rope/refactor/occurrences.py @@ -1,7 +1,46 @@ +"""Find occurrences of a name in a project. + +This module consists of a `Finder` that finds all occurrences of a name +in a project. The `Finder.find_occurrences()` method is a generator that +yields `Occurrence` instances for each occurrence of the name. To create +a `Finder` object, use the `create_finder()` function: + + finder = occurrences.create_finder(project, 'foo', pyname) + for occurrence in finder.find_occurrences(): + pass + +It's possible to filter the occurrences. They can be specified when +calling the `create_finder()` function. + + * `only_calls`: If True, return only those instances where the name is + a function that's being called. + + * `imports`: If False, don't return instances that are in import + statements. + + * `unsure`: If a prediate function, return instances where we don't + know what the name references. It also filters based on the + predicate function. + + * `docs`: If True, it will search for occurrences in regions normally + ignored. E.g., strings and comments. + + * `in_hierarchy`: If True, it will find occurrences if the name is in + the class's hierarchy. + + * `instance`: Used only when you want implicit interfaces to be + considered. +""" + import re -import rope.base.pynames -from rope.base import pynames, pyobjects, codeanalyze, evaluate, exceptions, utils, worder +from rope.base import codeanalyze +from rope.base import evaluate +from rope.base import exceptions +from rope.base import pynames +from rope.base import pyobjects +from rope.base import utils +from rope.base import worder class Finder(object): @@ -19,8 +58,8 @@ class Finder(object): """ - def __init__(self, pycore, name, filters=[lambda o: True], docs=False): - self.pycore = pycore + def __init__(self, project, name, filters=[lambda o: True], docs=False): + self.project = project self.name = name self.docs = docs self.filters = filters @@ -28,7 +67,7 @@ def __init__(self, pycore, name, filters=[lambda o: True], docs=False): def find_occurrences(self, resource=None, pymodule=None): """Generate `Occurrence` instances""" - tools = _OccurrenceToolsCreator(self.pycore, resource=resource, + tools = _OccurrenceToolsCreator(self.project, resource=resource, pymodule=pymodule, docs=self.docs) for offset in self._textual_finder.find_offsets(tools.source_code): occurrence = Occurrence(tools, offset) @@ -41,7 +80,7 @@ def find_occurrences(self, resource=None, pymodule=None): break -def create_finder(pycore, name, pyname, only_calls=False, imports=True, +def create_finder(project, name, pyname, only_calls=False, imports=True, unsure=None, docs=False, instance=None, in_hierarchy=False): """A factory for `Finder` @@ -50,25 +89,25 @@ def create_finder(pycore, name, pyname, only_calls=False, imports=True, considered. """ - pynames = set([pyname]) + pynames_ = set([pyname]) filters = [] if only_calls: filters.append(CallsFilter()) if not imports: filters.append(NoImportsFilter()) - if isinstance(instance, rope.base.pynames.ParameterName): + if isinstance(instance, pynames.ParameterName): for pyobject in instance.get_objects(): try: - pynames.add(pyobject[name]) + pynames_.add(pyobject[name]) except exceptions.AttributeNotFoundError: pass - for pyname in pynames: + for pyname in pynames_: filters.append(PyNameFilter(pyname)) if in_hierarchy: filters.append(InHierarchyFilter(pyname)) if unsure: filters.append(UnsureFilter(unsure)) - return Finder(pycore, name, filters=filters, docs=docs) + return Finder(project, name, filters=filters, docs=docs) class Occurrence(object): @@ -96,7 +135,8 @@ def get_pyname(self): @utils.saveit def get_primary_and_pyname(self): try: - return self.tools.name_finder.get_primary_and_pyname_at(self.offset) + return self.tools.name_finder.get_primary_and_pyname_at( + self.offset) except exceptions.BadIdentifierError: pass @@ -109,11 +149,13 @@ def is_called(self): return self.tools.word_finder.is_a_function_being_called(self.offset) def is_defined(self): - return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset) + return self.tools.word_finder.is_a_class_or_function_name_in_header( + self.offset) def is_a_fixed_primary(self): - return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset) or \ - self.tools.word_finder.is_a_name_after_from_import(self.offset) + return self.tools.word_finder.is_a_class_or_function_name_in_header( + self.offset) or \ + self.tools.word_finder.is_a_name_after_from_import(self.offset) def is_written(self): return self.tools.word_finder.is_assigned_here(self.offset) @@ -134,11 +176,14 @@ def same_pyname(expected, pyname): return False if expected == pyname: return True - if type(expected) not in (pynames.ImportedModule, pynames.ImportedName) and \ - type(pyname) not in (pynames.ImportedModule, pynames.ImportedName): + if type(expected) not in (pynames.ImportedModule, pynames.ImportedName) \ + and type(pyname) not in \ + (pynames.ImportedModule, pynames.ImportedName): return False - return expected.get_definition_location() == pyname.get_definition_location() and \ - expected.get_object() == pyname.get_object() + return expected.get_definition_location() == \ + pyname.get_definition_location() and \ + expected.get_object() == pyname.get_object() + def unsure_pyname(pyname, unbound=True): """Return `True` if we don't know what this name references""" @@ -151,7 +196,7 @@ def unsure_pyname(pyname, unbound=True): class PyNameFilter(object): - """For finding occurrences of a name""" + """For finding occurrences of a name.""" def __init__(self, pyname): self.pyname = pyname @@ -162,7 +207,7 @@ def __call__(self, occurrence): class InHierarchyFilter(object): - """For finding occurrences of a name""" + """Finds the occurrence if the name is in the class's hierarchy.""" def __init__(self, pyname, implementations_only=False): self.pyname = pyname @@ -203,6 +248,7 @@ def _get_root_classes(self, pyclass, name): class UnsureFilter(object): + """Occurrences where we don't knoow what the name references.""" def __init__(self, unsure): self.unsure = unsure @@ -213,6 +259,7 @@ def __call__(self, occurrence): class NoImportsFilter(object): + """Don't include import statements as occurrences.""" def __call__(self, occurrence): if occurrence.is_in_import_statement(): @@ -220,6 +267,7 @@ def __call__(self, occurrence): class CallsFilter(object): + """Filter out non-call occurrences.""" def __call__(self, occurrence): if not occurrence.is_called(): @@ -258,8 +306,10 @@ def _normal_search(self, source): try: found = source.index(self.name, current) current = found + len(self.name) - if (found == 0 or not self._is_id_char(source[found - 1])) and \ - (current == len(source) or not self._is_id_char(source[current])): + if (found == 0 or + not self._is_id_char(source[found - 1])) and \ + (current == len(source) or + not self._is_id_char(source[current])): yield found except ValueError: break @@ -282,7 +332,7 @@ def _get_source(self, resource, pymodule): def _get_occurrence_pattern(self, name): occurrence_pattern = _TextualFinder.any('occurrence', - ['\\b' + name + '\\b']) + ['\\b' + name + '\\b']) pattern = re.compile(occurrence_pattern + '|' + self.comment_pattern + '|' + self.string_pattern) return pattern @@ -294,8 +344,8 @@ def any(name, list_): class _OccurrenceToolsCreator(object): - def __init__(self, pycore, resource=None, pymodule=None, docs=False): - self.pycore = pycore + def __init__(self, project, resource=None, pymodule=None, docs=False): + self.project = project self.__resource = resource self.__pymodule = pymodule self.docs = docs @@ -331,4 +381,4 @@ def resource(self): def pymodule(self): if self.__pymodule is not None: return self.__pymodule - return self.pycore.resource_to_pyobject(self.resource) + return self.project.get_pymodule(self.resource) diff --git a/pymode/libs2/rope/refactor/patchedast.py b/pymode/libs2/rope/refactor/patchedast.py index 88fa4d85..28d36d5a 100644 --- a/pymode/libs2/rope/refactor/patchedast.py +++ b/pymode/libs2/rope/refactor/patchedast.py @@ -68,6 +68,7 @@ def __init__(self, source, children=False): Number = object() String = object() + semicolon_or_as_in_except = object() def __call__(self, node): method = getattr(self, '_' + node.__class__.__name__, None) @@ -111,6 +112,10 @@ def _handle(self, node, base_children, eat_parens=False, eat_spaces=False): elif child == '!=': # INFO: This has been added to handle deprecated ``<>`` region = self.source.consume_not_equal() + elif child == self.semicolon_or_as_in_except: + # INFO: This has been added to handle deprecated + # semicolon in except + region = self.source.consume_except_as_or_semicolon() else: region = self.source.consume(child) child = self.source[region[0]:region[1]] @@ -205,16 +210,17 @@ def _find_next_statement_start(self): for child in children: if isinstance(child, ast.stmt): return child.col_offset \ - + self.lines.get_line_start(child.lineno) + + self.lines.get_line_start(child.lineno) return len(self.source.source) - _operators = {'And': 'and', 'Or': 'or', 'Add': '+', 'Sub': '-', 'Mult': '*', - 'Div': '/', 'Mod': '%', 'Pow': '**', 'LShift': '<<', - 'RShift': '>>', 'BitOr': '|', 'BitAnd': '&', 'BitXor': '^', - 'FloorDiv': '//', 'Invert': '~', 'Not': 'not', 'UAdd': '+', - 'USub': '-', 'Eq': '==', 'NotEq': '!=', 'Lt': '<', - 'LtE': '<=', 'Gt': '>', 'GtE': '>=', 'Is': 'is', - 'IsNot': 'is not', 'In': 'in', 'NotIn': 'not in'} + _operators = {'And': 'and', 'Or': 'or', 'Add': '+', 'Sub': '-', + 'Mult': '*', 'Div': '/', 'Mod': '%', 'Pow': '**', + 'LShift': '<<', 'RShift': '>>', 'BitOr': '|', 'BitAnd': '&', + 'BitXor': '^', 'FloorDiv': '//', 'Invert': '~', + 'Not': 'not', 'UAdd': '+', 'USub': '-', 'Eq': '==', + 'NotEq': '!=', 'Lt': '<', 'LtE': '<=', 'Gt': '>', + 'GtE': '>=', 'Is': 'is', 'IsNot': 'is not', 'In': 'in', + 'NotIn': 'not in'} def _get_op(self, node): return self._operators[node.__class__.__name__].split(' ') @@ -351,7 +357,8 @@ def _ImportFrom(self, node): children = ['from'] if node.level: children.append('.' * node.level) - children.extend([node.module or '', # see comment at rope.base.ast.walk + # see comment at rope.base.ast.walk + children.extend([node.module or '', 'import']) children.extend(self._child_nodes(node.names, ',')) self._handle(node, children) @@ -380,7 +387,8 @@ def _FunctionDef(self, node): def _arguments(self, node): children = [] args = list(node.args) - defaults = [None] * (len(args) - len(node.defaults)) + list(node.defaults) + defaults = [None] * (len(args) - len(node.defaults)) + \ + list(node.defaults) for index, (arg, default) in enumerate(zip(args, defaults)): if index > 0: children.append(',') @@ -568,13 +576,16 @@ def _ExceptHandler(self, node): self._excepthandler(node) def _excepthandler(self, node): + # self._handle(node, [self.semicolon_or_as_in_except]) children = ['except'] if node.type: children.append(node.type) if node.name: - children.extend([',', node.name]) + children.append(self.semicolon_or_as_in_except) + children.append(node.name) children.append(':') children.extend(node.body) + self._handle(node, children) def _Tuple(self, node): @@ -663,6 +674,10 @@ def consume_not_equal(self): repattern = _Source._not_equals_pattern return self._consume_pattern(repattern) + def consume_except_as_or_semicolon(self): + repattern = re.compile(r'as|,') + return self._consume_pattern(repattern) + def _good_token(self, token, offset, start=None): """Checks whether consumed token is in comments""" if start is None: diff --git a/pymode/libs2/rope/refactor/rename.py b/pymode/libs2/rope/refactor/rename.py index 65e6e1d5..3f1f5b7e 100644 --- a/pymode/libs2/rope/refactor/rename.py +++ b/pymode/libs2/rope/refactor/rename.py @@ -1,8 +1,9 @@ import warnings -from rope.base import exceptions, pyobjects, pynames, taskhandle, evaluate, worder, codeanalyze +from rope.base import (exceptions, pyobjects, pynames, taskhandle, + evaluate, worder, codeanalyze, libutils) from rope.base.change import ChangeSet, ChangeContents, MoveResource -from rope.refactor import occurrences, sourceutils +from rope.refactor import occurrences class Rename(object): @@ -16,11 +17,10 @@ class Rename(object): def __init__(self, project, resource, offset=None): """If `offset` is None, the `resource` itself will be renamed""" self.project = project - self.pycore = project.pycore self.resource = resource if offset is not None: self.old_name = worder.get_name_at(self.resource, offset) - this_pymodule = self.pycore.resource_to_pyobject(self.resource) + this_pymodule = self.project.get_pymodule(self.resource) self.old_instance, self.old_pyname = \ evaluate.eval_location2(this_pymodule, offset) if self.old_pyname is None: @@ -30,7 +30,7 @@ def __init__(self, project, resource, offset=None): else: if not resource.is_folder() and resource.name == '__init__.py': resource = resource.parent - dummy_pymodule = self.pycore.get_string_module('') + dummy_pymodule = libutils.get_string_module(self.project, '') self.old_instance = None self.old_pyname = pynames.ImportedModule(dummy_pymodule, resource=resource) @@ -70,6 +70,7 @@ def get_changes(self, new_name, in_file=None, in_hierarchy=False, warnings.warn( 'unsure parameter should be a function that returns ' 'True or False', DeprecationWarning, stacklevel=2) + def unsure_func(value=unsure): return value unsure = unsure_func @@ -82,14 +83,15 @@ def unsure_func(value=unsure): if _is_local(self.old_pyname): resources = [self.resource] if resources is None: - resources = self.pycore.get_python_files() + resources = self.project.get_python_files() changes = ChangeSet('Renaming <%s> to <%s>' % (self.old_name, new_name)) finder = occurrences.create_finder( - self.pycore, self.old_name, self.old_pyname, unsure=unsure, + self.project, self.old_name, self.old_pyname, unsure=unsure, docs=docs, instance=self.old_instance, in_hierarchy=in_hierarchy and self.is_method()) - job_set = task_handle.create_jobset('Collecting Changes', len(resources)) + job_set = task_handle.create_jobset('Collecting Changes', + len(resources)) for file_ in resources: job_set.started_job(file_.path) new_content = rename_in_module(finder, new_name, resource=file_) @@ -119,8 +121,8 @@ def _is_renaming_a_module(self): def is_method(self): pyname = self.old_pyname return isinstance(pyname, pynames.DefinedName) and \ - isinstance(pyname.get_object(), pyobjects.PyFunction) and \ - isinstance(pyname.get_object().parent, pyobjects.PyClass) + isinstance(pyname.get_object(), pyobjects.PyFunction) and \ + isinstance(pyname.get_object().parent, pyobjects.PyClass) def _rename_module(self, resource, new_name, changes): if not resource.is_folder(): @@ -147,11 +149,11 @@ class ChangeOccurrences(object): """ def __init__(self, project, resource, offset): - self.pycore = project.pycore + self.project = project self.resource = resource self.offset = offset self.old_name = worder.get_name_at(resource, offset) - self.pymodule = self.pycore.resource_to_pyobject(self.resource) + self.pymodule = project.get_pymodule(self.resource) self.old_pyname = evaluate.eval_location(self.pymodule, offset) def get_old_name(self): @@ -161,7 +163,7 @@ def get_old_name(self): def _get_scope_offset(self): lines = self.pymodule.lines scope = self.pymodule.get_scope().\ - get_inner_scope_for_line(lines.get_line_number(self.offset)) + get_inner_scope_for_line(lines.get_line_number(self.offset)) start = lines.get_line_start(scope.get_start()) end = lines.get_line_end(scope.get_end()) return start, end @@ -171,7 +173,7 @@ def get_changes(self, new_name, only_calls=False, reads=True, writes=True): (self.old_name, new_name)) scope_start, scope_end = self._get_scope_offset() finder = occurrences.create_finder( - self.pycore, self.old_name, self.old_pyname, + self.project, self.old_name, self.old_pyname, imports=False, only_calls=only_calls) new_contents = rename_in_module( finder, new_name, pymodule=self.pymodule, replace_primary=True, @@ -181,8 +183,9 @@ def get_changes(self, new_name, only_calls=False, reads=True, writes=True): return changes -def rename_in_module(occurrences_finder, new_name, resource=None, pymodule=None, - replace_primary=False, region=None, reads=True, writes=True): +def rename_in_module(occurrences_finder, new_name, resource=None, + pymodule=None, replace_primary=False, region=None, + reads=True, writes=True): """Returns the changed source or `None` if there is no changes""" if resource is not None: source_code = resource.read() @@ -203,6 +206,7 @@ def rename_in_module(occurrences_finder, new_name, resource=None, pymodule=None, change_collector.add_change(start, end, new_name) return change_collector.get_changed() + def _is_local(pyname): module, lineno = pyname.get_definition_location() if lineno is None: @@ -212,5 +216,5 @@ def _is_local(pyname): scope.get_kind() in ('Function', 'Class'): scope = scope.parent return scope.get_kind() == 'Function' and \ - pyname in scope.get_names().values() and \ - isinstance(pyname, pynames.AssignedName) + pyname in scope.get_names().values() and \ + isinstance(pyname, pynames.AssignedName) diff --git a/pymode/libs2/rope/refactor/restructure.py b/pymode/libs2/rope/refactor/restructure.py index 1573c2fe..98a11e3d 100644 --- a/pymode/libs2/rope/refactor/restructure.py +++ b/pymode/libs2/rope/refactor/restructure.py @@ -1,6 +1,7 @@ import warnings from rope.base import change, taskhandle, builtins, ast, codeanalyze +from rope.base import libutils from rope.refactor import patchedast, similarfinder, sourceutils from rope.refactor.importutils import module_imports @@ -52,7 +53,6 @@ class Restructure(object): from rope.contrib import generate args - pycore: type=rope.base.pycore.PyCore project: type=rope.base.project.Project Example #4:: @@ -79,7 +79,7 @@ def __init__(self, project, pattern, goal, args=None, See class pydoc for more info about the arguments. """ - self.pycore = project.pycore + self.project = project self.pattern = pattern self.goal = goal self.args = args @@ -132,13 +132,13 @@ def get_changes(self, checks=None, imports=None, resources=None, (self.pattern, self.goal)) if resources is not None: files = [resource for resource in resources - if self.pycore.is_python_file(resource)] + if libutils.is_python_file(self.project, resource)] else: - files = self.pycore.get_python_files() + files = self.project.get_python_files() job_set = task_handle.create_jobset('Collecting Changes', len(files)) for resource in files: job_set.started_job(resource.path) - pymodule = self.pycore.resource_to_pyobject(resource) + pymodule = self.project.get_pymodule(resource) finder = similarfinder.SimilarFinder(pymodule, wildcards=self.wildcards) matches = list(finder.get_matches(self.pattern, self.args)) @@ -161,16 +161,16 @@ def _add_imports(self, resource, source, imports): if not imports: return source import_infos = self._get_import_infos(resource, imports) - pymodule = self.pycore.get_string_module(source, resource) - imports = module_imports.ModuleImports(self.pycore, pymodule) + pymodule = libutils.get_string_module(self.project, source, resource) + imports = module_imports.ModuleImports(self.project, pymodule) for import_info in import_infos: imports.add_import(import_info) return imports.get_changed_source() def _get_import_infos(self, resource, imports): - pymodule = self.pycore.get_string_module('\n'.join(imports), - resource) - imports = module_imports.ModuleImports(self.pycore, pymodule) + pymodule = libutils.get_string_module( + self.project, '\n'.join(imports), resource) + imports = module_imports.ModuleImports(self.project, pymodule) return [imports.import_info for imports in imports.imports] @@ -183,7 +183,7 @@ def make_checks(self, string_checks): checks = {} for key, value in string_checks.items(): is_pyname = not key.endswith('.object') and \ - not key.endswith('.type') + not key.endswith('.type') evaluated = self._evaluate(value, is_pyname=is_pyname) if evaluated is not None: checks[key] = evaluated @@ -198,7 +198,7 @@ def get_attribute(self, name): return builtins.builtins[name] pyobject = _BuiltinsStub() else: - pyobject = self.pycore.get_module(attributes[0]) + pyobject = self.project.get_module(attributes[0]) for attribute in attributes[1:]: pyname = pyobject[attribute] if pyname is None: diff --git a/pymode/libs2/rope/refactor/similarfinder.py b/pymode/libs2/rope/refactor/similarfinder.py index fc71abfa..f1a7d42d 100644 --- a/pymode/libs2/rope/refactor/similarfinder.py +++ b/pymode/libs2/rope/refactor/similarfinder.py @@ -2,9 +2,11 @@ import re import rope.refactor.wildcards -from rope.base import codeanalyze, evaluate, exceptions, ast, builtins -from rope.refactor import (patchedast, sourceutils, occurrences, - wildcards, importutils) +from rope.base import libutils +from rope.base import codeanalyze, exceptions, ast, builtins +from rope.refactor import (patchedast, wildcards) + +from rope.refactor.patchedast import MismatchedTokenError class BadNameInCheckError(exceptions.RefactoringError): @@ -22,8 +24,12 @@ class SimilarFinder(object): def __init__(self, pymodule, wildcards=None): """Construct a SimilarFinder""" self.source = pymodule.source_code - self.raw_finder = RawSimilarFinder( - pymodule.source_code, pymodule.get_ast(), self._does_match) + try: + self.raw_finder = RawSimilarFinder( + pymodule.source_code, pymodule.get_ast(), self._does_match) + except MismatchedTokenError: + print "in file %s" % pymodule.resource.path + raise self.pymodule = pymodule if wildcards is None: self.wildcards = {} @@ -41,7 +47,7 @@ def get_matches(self, code, args={}, start=0, end=None): if 'skip' in args.get('', {}): resource, region = args['']['skip'] if resource == self.pymodule.get_resource(): - skip_region = region + skip_region = region return self.raw_finder.get_matches(code, start=start, end=end, skip=skip_region) @@ -97,7 +103,7 @@ def get_matches(self, code, start=0, end=None, skip=None): if start <= match_start and match_end <= end: if skip is not None and (skip[0] < match_end and skip[1] > match_start): - continue + continue yield match def _get_matched_asts(self, code): @@ -175,8 +181,8 @@ def __check_stmt_list(self, nodes): def _match_nodes(self, expected, node, mapping): if isinstance(expected, ast.Name): - if self.ropevar.is_var(expected.id): - return self._match_wildcard(expected, node, mapping) + if self.ropevar.is_var(expected.id): + return self._match_wildcard(expected, node, mapping) if not isinstance(expected, ast.AST): return expected == node if expected.__class__ != node.__class__: @@ -296,8 +302,8 @@ def substitute(self, mapping): def _get_pattern(cls): if cls._match_pattern is None: pattern = codeanalyze.get_comment_pattern() + '|' + \ - codeanalyze.get_string_pattern() + '|' + \ - r'(?P\$\{[^\s\$\}]*\})' + codeanalyze.get_string_pattern() + '|' + \ + r'(?P\$\{[^\s\$\}]*\})' cls._match_pattern = re.compile(pattern) return cls._match_pattern @@ -339,6 +345,7 @@ def _is_var(self, name): def make_pattern(code, variables): variables = set(variables) collector = codeanalyze.ChangeCollector(code) + def does_match(node, name): return isinstance(node, ast.Name) and node.id == name finder = RawSimilarFinder(code, does_match=does_match) @@ -352,11 +359,12 @@ def does_match(node, name): def _pydefined_to_str(pydefined): address = [] - if isinstance(pydefined, (builtins.BuiltinClass, builtins.BuiltinFunction)): + if isinstance(pydefined, + (builtins.BuiltinClass, builtins.BuiltinFunction)): return '__builtins__.' + pydefined.get_name() else: while pydefined.parent is not None: address.insert(0, pydefined.get_name()) pydefined = pydefined.parent - module_name = pydefined.pycore.modname(pydefined.resource) + module_name = libutils.modname(pydefined.resource) return '.'.join(module_name.split('.') + address) diff --git a/pymode/libs2/rope/refactor/sourceutils.py b/pymode/libs2/rope/refactor/sourceutils.py index f64213db..9b842906 100644 --- a/pymode/libs2/rope/refactor/sourceutils.py +++ b/pymode/libs2/rope/refactor/sourceutils.py @@ -1,4 +1,4 @@ -from rope.base import ast, codeanalyze +from rope.base import codeanalyze def get_indents(lines, lineno): @@ -48,7 +48,7 @@ def add_methods(pymodule, class_scope, methods_sources): methods = '\n\n' + '\n\n'.join(methods_sources) indented_methods = fix_indentation( methods, get_indents(lines, class_scope.get_start()) + - get_indent(pymodule.pycore)) + get_indent(pymodule.pycore.project)) result = [] result.append(source_code[:insertion_offset]) result.append(indented_methods) @@ -58,7 +58,7 @@ def add_methods(pymodule, class_scope, methods_sources): def get_body(pyfunction): """Return unindented function body""" - scope = pyfunction.get_scope() + # FIXME scope = pyfunction.get_scope() pymodule = pyfunction.get_module() start, end = get_body_region(pyfunction) return fix_indentation(pymodule.source_code[start:end], 0) @@ -87,6 +87,5 @@ def get_body_region(defined): return start, end -def get_indent(pycore): - project = pycore.project +def get_indent(project): return project.prefs.get('indent_size', 4) diff --git a/pymode/libs2/rope/refactor/suites.py b/pymode/libs2/rope/refactor/suites.py index d955c819..4f9a8c71 100644 --- a/pymode/libs2/rope/refactor/suites.py +++ b/pymode/libs2/rope/refactor/suites.py @@ -14,6 +14,7 @@ def find_visible_for_suite(root, lines): line2 = find_visible_for_suite(root, lines[1:]) suite1 = root.find_suite(line1) suite2 = root.find_suite(line2) + def valid(suite): return suite is not None and not suite.ignored if valid(suite1) and not valid(suite2): diff --git a/pymode/libs2/rope/refactor/topackage.py b/pymode/libs2/rope/refactor/topackage.py index b7113979..f36a6d52 100644 --- a/pymode/libs2/rope/refactor/topackage.py +++ b/pymode/libs2/rope/refactor/topackage.py @@ -1,12 +1,12 @@ import rope.refactor.importutils -from rope.base.change import ChangeSet, ChangeContents, MoveResource, CreateFolder +from rope.base.change import ChangeSet, ChangeContents, MoveResource, \ + CreateFolder class ModuleToPackage(object): def __init__(self, project, resource): self.project = project - self.pycore = project.pycore self.resource = resource def get_changes(self): @@ -27,6 +27,6 @@ def get_changes(self): return changes def _transform_relatives_to_absolute(self, resource): - pymodule = self.pycore.resource_to_pyobject(resource) - import_tools = rope.refactor.importutils.ImportTools(self.pycore) + pymodule = self.project.get_pymodule(resource) + import_tools = rope.refactor.importutils.ImportTools(self.project) return import_tools.relatives_to_absolutes(pymodule) diff --git a/pymode/libs2/rope/refactor/usefunction.py b/pymode/libs2/rope/refactor/usefunction.py index b0621525..85896a98 100644 --- a/pymode/libs2/rope/refactor/usefunction.py +++ b/pymode/libs2/rope/refactor/usefunction.py @@ -1,6 +1,7 @@ from rope.base import (change, taskhandle, evaluate, exceptions, pyobjects, pynames, ast) -from rope.refactor import restructure, sourceutils, similarfinder, importutils +from rope.base import libutils +from rope.refactor import restructure, sourceutils, similarfinder class UseFunction(object): @@ -9,7 +10,7 @@ class UseFunction(object): def __init__(self, project, resource, offset): self.project = project self.offset = offset - this_pymodule = project.pycore.resource_to_pyobject(resource) + this_pymodule = project.get_pymodule(resource) pyname = evaluate.eval_location(this_pymodule, offset) if pyname is None: raise exceptions.RefactoringError('Unresolvable name selected') @@ -37,7 +38,7 @@ def _check_returns(self): def get_changes(self, resources=None, task_handle=taskhandle.NullTaskHandle()): if resources is None: - resources = self.project.pycore.get_python_files() + resources = self.project.get_python_files() changes = change.ChangeSet('Using function <%s>' % self.pyfunction.get_name()) if self.resource in resources: @@ -55,7 +56,6 @@ def get_function_name(self): return self.pyfunction.get_name() def _restructure(self, resources, task_handle, others=True): - body = self._get_body() pattern = self._make_pattern() goal = self._make_goal(import_=others) imports = None @@ -75,7 +75,7 @@ def _find_temps(self): return find_temps(self.project, self._get_body()) def _module_name(self): - return self.project.pycore.modname(self.resource) + return libutils.modname(self.resource) def _make_pattern(self): params = self.pyfunction.get_param_names() @@ -123,7 +123,7 @@ def _is_expression(self): def find_temps(project, code): code = 'def f():\n' + sourceutils.indent_lines(code, 4) - pymodule = project.pycore.get_string_module(code) + pymodule = libutils.get_string_module(project, code) result = [] function_scope = pymodule.get_scope().get_scopes()[0] for name, pyname in function_scope.get_names().items(): @@ -135,16 +135,19 @@ def find_temps(project, code): def _returns_last(node): return node.body and isinstance(node.body[-1], ast.Return) + def _yield_count(node): visitor = _ReturnOrYieldFinder() visitor.start_walking(node) return visitor.yields + def _return_count(node): visitor = _ReturnOrYieldFinder() visitor.start_walking(node) return visitor.returns + class _ReturnOrYieldFinder(object): def __init__(self): diff --git a/pymode/libs2/rope/refactor/wildcards.py b/pymode/libs2/rope/refactor/wildcards.py index 6c487a2a..90040c79 100644 --- a/pymode/libs2/rope/refactor/wildcards.py +++ b/pymode/libs2/rope/refactor/wildcards.py @@ -100,7 +100,7 @@ def __call__(self, pymodule, node): pyname = self._evaluate_node(pymodule, node) if pyname is None or self.expected is None: return self.unsure - if self._unsure_pyname(pyname, unbound=self.kind=='name'): + if self._unsure_pyname(pyname, unbound=self.kind == 'name'): return True if self.kind == 'name': return self._same_pyname(self.expected, pyname) @@ -161,13 +161,15 @@ def _evaluate(self, code): class _BuiltinsStub(object): def get_attribute(self, name): return builtins.builtins[name] + def __getitem__(self, name): return builtins.builtins[name] + def __contains__(self, name): return name in builtins.builtins pyobject = _BuiltinsStub() else: - pyobject = self.project.pycore.get_module(attributes[0]) + pyobject = self.project.get_module(attributes[0]) for attribute in attributes[1:]: pyname = pyobject[attribute] if pyname is None: diff --git a/pymode/libs3/rope/__init__.py b/pymode/libs3/rope/__init__.py index 451ebe3a..a936fe29 100644 --- a/pymode/libs3/rope/__init__.py +++ b/pymode/libs3/rope/__init__.py @@ -1,7 +1,7 @@ """rope, a python refactoring library""" INFO = __doc__ -VERSION = '0.9.4' +VERSION = '0.9.4-1' COPYRIGHT = """\ Copyright (C) 2006-2010 Ali Gholami Rudi Copyright (C) 2009-2010 Anton Gritsay diff --git a/pymode/libs3/rope/refactor/patchedast.py b/pymode/libs3/rope/refactor/patchedast.py index 034dac35..042b33dd 100644 --- a/pymode/libs3/rope/refactor/patchedast.py +++ b/pymode/libs3/rope/refactor/patchedast.py @@ -1,7 +1,6 @@ import collections import re import warnings -import sys from rope.base import ast, codeanalyze, exceptions @@ -564,19 +563,6 @@ def _TryExcept(self, node): children.extend(['else', ':']) children.extend(node.orelse) self._handle(node, children) - - def _Try(self, node): - children = ['try', ':'] - children.extend(node.body) - children.extend(node.handlers) - if node.orelse: - children.extend(['else', ':']) - children.extend(node.orelse) - if node.finalbody: - children.extend(['finally', ':']) - children.extend(node.finalbody) - - self._handle(node, children) def _ExceptHandler(self, node): self._excepthandler(node) @@ -618,15 +604,9 @@ def _While(self, node): self._handle(node, children) def _With(self, node): - children = [] - if (sys.version_info[1] < 3): - children = ['with', node.context_expr] - if node.optional_vars: - children.extend(['as', node.optional_vars]) - else: - children = ['with', node.items[0].context_expr] - if node.items[0].optional_vars: - children.extend(['as', node.items[0].optional_vars]) + children = ['with', node.context_expr] + if node.optional_vars: + children.extend(['as', node.optional_vars]) children.append(':') children.extend(node.body) self._handle(node, children) diff --git a/pymode/libs3/rope/refactor/suites.py b/pymode/libs3/rope/refactor/suites.py index 041c06a2..d955c819 100644 --- a/pymode/libs3/rope/refactor/suites.py +++ b/pymode/libs3/rope/refactor/suites.py @@ -128,15 +128,6 @@ def _TryExcept(self, node): if node.orelse: self.suites.append(Suite(node.orelse, node.lineno, self.suite)) - def _Try(self, node): - self.suites.append(Suite(node.body, node.lineno, self.suite)) - for handler in node.handlers: - self.suites.append(Suite(handler.body, node.lineno, self.suite)) - if node.orelse: - self.suites.append(Suite(node.orelse, node.lineno, self.suite)) - if node.finalbody: - self.suites.append(Suite(node.finalbody, node.lineno, self.suite)) - def _add_if_like_node(self, node): self.suites.append(Suite(node.body, node.lineno, self.suite)) if node.orelse: From 7cf4eb928dea1d347d0832cd12b30c0672c679b3 Mon Sep 17 00:00:00 2001 From: Tyler Fenby Date: Sun, 21 Dec 2014 17:40:35 -0500 Subject: [PATCH 003/293] Update AUTHORS --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index c09fe72d..ef6d0bbc 100644 --- a/AUTHORS +++ b/AUTHORS @@ -47,3 +47,4 @@ Contributors: * lee (loyalpartner); * nixon; * tramchamploo; +* Tyler Fenby (https://github.com/TFenby) From 74f5ad3bafaafbb9234e704c4644273f63986f5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Martano?= Date: Sat, 27 Dec 2014 19:24:46 -0200 Subject: [PATCH 004/293] typo --- doc/pymode.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/pymode.txt b/doc/pymode.txt index 5cec03f9..33968b3e 100644 --- a/doc/pymode.txt +++ b/doc/pymode.txt @@ -435,7 +435,7 @@ use the current directory. The location of the `.ropeproject` folder may also be overridden if you wish to keep it outside of your project root. The rope library treats this folder as a -project resource, so the path will always be relative to your proejct root (a +project resource, so the path will always be relative to your project root (a leading '/' will be ignored). You may use `'..'` path segments to place the folder outside of your project root. *'g:pymode_rope_ropefolder'* From e8c0ed05061114c44a5d18b7a0fef5ccfd408a4e Mon Sep 17 00:00:00 2001 From: chuan92 Date: Mon, 12 Jan 2015 19:02:15 +0800 Subject: [PATCH 005/293] Fix #482, (PEP 263 Python Source Code Encodings) --- pymode/run.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pymode/run.py b/pymode/run.py index b5a2bfa1..b966fdfc 100644 --- a/pymode/run.py +++ b/pymode/run.py @@ -6,8 +6,7 @@ from .environment import env -encoding = re(r'#[^\w]+coding:\s+utf.*$') - +encoding = re(r'#.*coding[:=]\s*([-\w.]+)') def run_code(): """ Run python code in current buffer. @@ -18,9 +17,12 @@ def run_code(): errors, err = [], '' line1, line2 = env.var('a:line1'), env.var('a:line2') lines = __prepare_lines(line1, line2) - for ix in (0, 1): - if encoding.match(lines[ix]): - lines.pop(ix) + if encoding.match(lines[0]): + lines.pop(0) + if encoding.match(lines[0]): + lines.pop(0) + elif encoding.match(lines[1]): + lines.pop(1) context = dict( __name__='__main__', From 8b184ccc1ad312c49257831b6b1286df60f1096a Mon Sep 17 00:00:00 2001 From: Colin Deasy Date: Tue, 13 Jan 2015 16:56:22 +0000 Subject: [PATCH 006/293] Don't force indentation to be 0 for lines not starting with whitespace Some docstrings/code parts use line continuation with zero whitespace on the following line. In this case we should accept the previous lines indentation instead of forcing the indentation to be zero. --- autoload/pymode/folding.vim | 4 ---- 1 file changed, 4 deletions(-) diff --git a/autoload/pymode/folding.vim b/autoload/pymode/folding.vim index 93f18b09..d0e09597 100644 --- a/autoload/pymode/folding.vim +++ b/autoload/pymode/folding.vim @@ -133,10 +133,6 @@ fun! pymode#folding#expr(lnum) "{{{ endif endif - if indent == 0 - return 0 - endif - return '=' endfunction "}}} From da4d8b23a4dbeb8e08db7cb4580acee0bca9e6a2 Mon Sep 17 00:00:00 2001 From: Stefan Scherfke Date: Wed, 14 Jan 2015 11:05:24 +0100 Subject: [PATCH 007/293] =?UTF-8?q?Fix=20issue=20#519=20=E2=80=93=C2=A0fol?= =?UTF-8?q?d=20text=20truncated=20when=20relativenumber=20is=20set.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- autoload/pymode/folding.vim | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autoload/pymode/folding.vim b/autoload/pymode/folding.vim index 93f18b09..2dcf0d68 100644 --- a/autoload/pymode/folding.vim +++ b/autoload/pymode/folding.vim @@ -23,7 +23,8 @@ fun! pymode#folding#text() " {{{ endif let line = getline(fs) - let nucolwidth = &fdc + &number * &numberwidth + let has_numbers = &number || &relativenumber + let nucolwidth = &fdc + has_numbers * &numberwidth let windowwidth = winwidth(0) - nucolwidth - 6 let foldedlinecount = v:foldend - v:foldstart From 9190132d4fb3f5758ea5a3840ec3c0789ec8ed1e Mon Sep 17 00:00:00 2001 From: Dimitrios Semitsoglou-Tsiapos Date: Tue, 17 Feb 2015 19:09:57 +0100 Subject: [PATCH 008/293] rope: correct refactoring function calls * (Temporarily) drops passing of `task_handle`. --- pymode/rope.py | 68 +++++++++++++++++++++----------------------------- 1 file changed, 28 insertions(+), 40 deletions(-) diff --git a/pymode/rope.py b/pymode/rope.py index 159900bf..2b12bd43 100644 --- a/pymode/rope.py +++ b/pymode/rope.py @@ -505,14 +505,7 @@ def get_input_str(refactor, ctx): @staticmethod def get_changes(refactor, input_str, in_hierarchy=False): - """ Get changes. - - :return Changes: - - """ - progress = ProgressHandler('Calculate changes ...') - return refactor.get_changes( - input_str, task_handle=progress.handle, in_hierarchy = in_hierarchy) + raise NotImplementedError class RenameRefactoring(Refactoring): @@ -550,8 +543,26 @@ def get_input_str(self, refactor, ctx): return newname + @staticmethod + def get_changes(refactor, input_str, in_hierarchy): + """ Get changes. + + :return Changes: + + """ + return refactor.get_changes(input_str, in_hierarchy=in_hierarchy) + +class ExtractRefactoring(Refactoring): + @staticmethod + def get_changes(refactor, input_str, in_hierarchy): + """ Get changes. + + :return Changes: + + """ + return refactor.get_changes(input_str) #, global_=not in_hierarchy) -class ExtractMethodRefactoring(Refactoring): +class ExtractMethodRefactoring(ExtractRefactoring): """ Extract method. """ @@ -574,18 +585,8 @@ def get_refactor(ctx): return extract.ExtractMethod( ctx.project, ctx.resource, offset1, offset2) - @staticmethod - def get_changes(refactor, input_str): - """ Get changes. - :return Changes: - - """ - - return refactor.get_changes(input_str) - - -class ExtractVariableRefactoring(Refactoring): +class ExtractVariableRefactoring(ExtractRefactoring): """ Extract variable. """ @@ -608,16 +609,6 @@ def get_refactor(ctx): return extract.ExtractVariable( ctx.project, ctx.resource, offset1, offset2) - @staticmethod - def get_changes(refactor, input_str): - """ Get changes. - - :return Changes: - - """ - - return refactor.get_changes(input_str) - class InlineRefactoring(Refactoring): @@ -634,14 +625,13 @@ def get_refactor(ctx): return inline.create_inline(ctx.project, ctx.resource, offset) @staticmethod - def get_changes(refactor, input_str): + def get_changes(refactor, input_str, in_hierarchy): """ Get changes. :return Changes: """ - progress = ProgressHandler('Calculate changes ...') - return refactor.get_changes(task_handle=progress.handle) + return refactor.get_changes() class UseFunctionRefactoring(Refactoring): @@ -659,15 +649,13 @@ def get_refactor(ctx): return usefunction.UseFunction(ctx.project, ctx.resource, offset) @staticmethod - def get_changes(refactor, input_str): + def get_changes(refactor, input_str, in_hierarchy): """ Get changes. :return Changes: """ - progress = ProgressHandler('Calculate changes ...') - return refactor.get_changes( - resources=[refactor.resource], task_handle=progress.handle) + return refactor.get_changes() class ModuleToPackageRefactoring(Refactoring): @@ -684,7 +672,7 @@ def get_refactor(ctx): return ModuleToPackage(ctx.project, ctx.resource) @staticmethod - def get_changes(refactor, input_str): + def get_changes(refactor, input_str, in_hierarchy): """ Get changes. :return Changes: @@ -746,7 +734,7 @@ def get_refactor(ctx): return change_signature.ChangeSignature( ctx.project, ctx.resource, offset) - def get_changes(self, refactor, input_string): + def get_changes(self, refactor, input_string, in_hierarchy): """ Function description. :return Rope.changes: @@ -771,7 +759,7 @@ def get_changes(self, refactor, input_string): changers.append(change_signature.ArgumentReorderer( order, autodef='None')) - return refactor.get_changes(changers) + return refactor.get_changes(changers, in_hierarchy=in_hierarchy) class GenerateElementRefactoring(Refactoring): From 7ce8c76675ef577bc565e9d5494e721310690c78 Mon Sep 17 00:00:00 2001 From: Samir Benmendil Date: Sun, 22 Feb 2015 14:58:29 +0000 Subject: [PATCH 009/293] Don't skip a line when the first docstring contains text --- autoload/pymode/folding.vim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoload/pymode/folding.vim b/autoload/pymode/folding.vim index 0ae61dd4..e54ef1bf 100644 --- a/autoload/pymode/folding.vim +++ b/autoload/pymode/folding.vim @@ -18,7 +18,7 @@ fun! pymode#folding#text() " {{{ while getline(fs) !~ s:def_regex && getline(fs) !~ s:doc_begin_regex let fs = nextnonblank(fs + 1) endwhile - if getline(fs) =~ s:doc_begin_regex + if getline(fs) =~ s:doc_end_regex && getline(fs) =~ s:doc_begin_regex let fs = nextnonblank(fs + 1) endif let line = getline(fs) From e6c914481768139c8abaf2491b17ace5b7b37c1a Mon Sep 17 00:00:00 2001 From: Samir Benmendil Date: Mon, 23 Feb 2015 00:54:44 +0000 Subject: [PATCH 010/293] Don't fold single line def Simply checks whether the next line is more indented than the line matching def_regex and if it's not don't increase the fold level. --- autoload/pymode/folding.vim | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/autoload/pymode/folding.vim b/autoload/pymode/folding.vim index 0ae61dd4..7974112b 100644 --- a/autoload/pymode/folding.vim +++ b/autoload/pymode/folding.vim @@ -50,6 +50,10 @@ fun! pymode#folding#expr(lnum) "{{{ endif if line =~ s:def_regex + " single line def + if indent(a:lnum) >= indent(a:lnum+1) + return '=' + endif " Check if last decorator is before the last def let decorated = 0 let lnum = a:lnum - 1 From 86161aa9cd7099abd08ee68fc68420cea58c05d2 Mon Sep 17 00:00:00 2001 From: Dylan Semler Date: Sun, 8 Mar 2015 08:26:23 -0400 Subject: [PATCH 011/293] fix placement of pymode_options help description --- doc/pymode.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/pymode.txt b/doc/pymode.txt index 33968b3e..36ce040e 100644 --- a/doc/pymode.txt +++ b/doc/pymode.txt @@ -98,10 +98,6 @@ Setup default python options *'g:pymode_options'* > let g:pymode_options = 1 -Setup max line length *'g:pymode_options_max_line_length'* -> - let g:pymode_options_max_line_length = 79 - If this option is set to 1, pymode will enable the following options for python buffers: > @@ -115,6 +111,10 @@ python buffers: > setlocal commentstring=#%s setlocal define=^\s*\\(def\\\\|class\\) +Setup max line length *'g:pymode_options_max_line_length'* +> + let g:pymode_options_max_line_length = 79 + Enable colorcolumn display at max_line_length *'g:pymode_options_colorcolumn'* > let g:pymode_options_colorcolumn = 1 From f4e8437457df2164f51eaa67ac4391425f48e322 Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Wed, 1 Apr 2015 14:34:06 -0700 Subject: [PATCH 012/293] Use 'https:' instead of 'git:' in documentation For people behind a proxy server it is difficult to 'git clone' using 'git:'. While 'https:' is universally usable. --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index b6f9bb69..b1da7774 100644 --- a/README.rst +++ b/README.rst @@ -62,7 +62,7 @@ Using pathogen (recommended) % cd ~/.vim % mkdir -p bundle && cd bundle - % git clone git://github.com/klen/python-mode.git + % git clone https://github.com/klen/python-mode.git - Enable `pathogen `_ in your ``~/.vimrc``: :: @@ -81,7 +81,7 @@ Manually -------- :: - % git clone git://github.com/klen/python-mode.git + % git clone https://github.com/klen/python-mode.git % cd python-mode % cp -R * ~/.vim From 0368708fd3b3dbb8e3e2618e71f5b015c8142578 Mon Sep 17 00:00:00 2001 From: Jacob Niehus Date: Fri, 3 Apr 2015 11:33:29 -0700 Subject: [PATCH 013/293] Skip doctest regions when searching for open pairs --- autoload/pymode/indent.vim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoload/pymode/indent.vim b/autoload/pymode/indent.vim index d8e9f148..efd41f29 100644 --- a/autoload/pymode/indent.vim +++ b/autoload/pymode/indent.vim @@ -110,7 +110,7 @@ function! s:SearchParensPair() " {{{ " Skip strings and comments and don't look too far let skip = "line('.') < " . (line - 50) . " ? dummy :" . \ 'synIDattr(synID(line("."), col("."), 0), "name") =~? ' . - \ '"string\\|comment"' + \ '"string\\|comment\\|doctest"' " Search for parentheses call cursor(line, col) From 1ff7c2febe0bb1d901b5d54a64306da741c0d50f Mon Sep 17 00:00:00 2001 From: Valerio Crini Date: Sun, 3 May 2015 21:10:14 +0200 Subject: [PATCH 014/293] removing misstype --- doc/pymode.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/pymode.txt b/doc/pymode.txt index ae2a5d27..6f5bd5fc 100644 --- a/doc/pymode.txt +++ b/doc/pymode.txt @@ -443,7 +443,7 @@ imported) from project *'g:pymode_rope_autoimport'* Load modules to autoimport by default *'g:pymode_rope_autoimport_modules'* > - let g:pymode_rope_autoimport_modules = ['os', 'shutil', 'datetime']) + let g:pymode_rope_autoimport_modules = ['os', 'shutil', 'datetime'] Offer to unresolved import object after completion. > From ba3ec252c23ba1503dc69b2446268d05bb0c362e Mon Sep 17 00:00:00 2001 From: Jacob Niehus Date: Sun, 3 May 2015 21:38:11 -0700 Subject: [PATCH 015/293] Highlight comments inside class/function arg lists --- syntax/python.vim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/syntax/python.vim b/syntax/python.vim index 4c218c04..8c941374 100644 --- a/syntax/python.vim +++ b/syntax/python.vim @@ -80,13 +80,13 @@ endif syn match pythonFunction "\%(\%(def\s\|@\)\s*\)\@<=\h\%(\w\|\.\)*" contained nextgroup=pythonVars syn region pythonVars start="(" skip=+\(".*"\|'.*'\)+ end=")" contained contains=pythonParameters transparent keepend syn match pythonParameters "[^,]*" contained contains=pythonParam skipwhite - syn match pythonParam "[^,]*" contained contains=pythonExtraOperator,pythonLambdaExpr,pythonBuiltinObj,pythonBuiltinType,pythonConstant,pythonString,pythonNumber,pythonBrackets,pythonSelf skipwhite + syn match pythonParam "[^,]*" contained contains=pythonExtraOperator,pythonLambdaExpr,pythonBuiltinObj,pythonBuiltinType,pythonConstant,pythonString,pythonNumber,pythonBrackets,pythonSelf,pythonComment skipwhite syn match pythonBrackets "{[(|)]}" contained skipwhite syn keyword pythonStatement class nextgroup=pythonClass skipwhite syn match pythonClass "\%(\%(class\s\)\s*\)\@<=\h\%(\w\|\.\)*" contained nextgroup=pythonClassVars syn region pythonClassVars start="(" end=")" contained contains=pythonClassParameters transparent keepend - syn match pythonClassParameters "[^,\*]*" contained contains=pythonBuiltin,pythonBuiltinObj,pythonBuiltinType,pythonExtraOperatorpythonStatement,pythonBrackets,pythonString skipwhite + syn match pythonClassParameters "[^,\*]*" contained contains=pythonBuiltin,pythonBuiltinObj,pythonBuiltinType,pythonExtraOperatorpythonStatement,pythonBrackets,pythonString,pythonComment skipwhite syn keyword pythonRepeat for while syn keyword pythonConditional if elif else From 956e3dbe4a3d767f08f258b94abe702669874245 Mon Sep 17 00:00:00 2001 From: Nate Zhang Date: Thu, 14 May 2015 04:13:44 +0800 Subject: [PATCH 016/293] Add Python documentation vertical display option --- autoload/pymode/doc.vim | 3 +++ plugin/pymode.vim | 3 +++ 2 files changed, 6 insertions(+) diff --git a/autoload/pymode/doc.vim b/autoload/pymode/doc.vim index d29d5e9e..b89eb0e7 100644 --- a/autoload/pymode/doc.vim +++ b/autoload/pymode/doc.vim @@ -29,6 +29,9 @@ fun! pymode#doc#show(word) "{{{ setlocal nomodifiable setlocal nomodified setlocal filetype=rst + if g:pymode_doc_vertical + wincmd L + endif wincmd p endfunction "}}} diff --git a/plugin/pymode.vim b/plugin/pymode.vim index 9bd4d95c..53408152 100644 --- a/plugin/pymode.vim +++ b/plugin/pymode.vim @@ -52,6 +52,9 @@ call pymode#default("g:pymode_options", 1) call pymode#default("g:pymode_options_max_line_length", 80) call pymode#default("g:pymode_options_colorcolumn", 1) +" Enable/disable vertical display of python documentation +call pymode#default("g:pymode_doc_vertical", 0) + " Minimal height of pymode quickfix window call pymode#default('g:pymode_quickfix_maxheight', 6) From 113909f386855c2c769374e6f664d74c794f9742 Mon Sep 17 00:00:00 2001 From: Chris Drane Date: Thu, 21 May 2015 10:23:30 -0400 Subject: [PATCH 017/293] Removed ambiguity in doc/pymode.txt related to pymode_options defaults --- doc/pymode.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/pymode.txt b/doc/pymode.txt index 33968b3e..36ce040e 100644 --- a/doc/pymode.txt +++ b/doc/pymode.txt @@ -98,10 +98,6 @@ Setup default python options *'g:pymode_options'* > let g:pymode_options = 1 -Setup max line length *'g:pymode_options_max_line_length'* -> - let g:pymode_options_max_line_length = 79 - If this option is set to 1, pymode will enable the following options for python buffers: > @@ -115,6 +111,10 @@ python buffers: > setlocal commentstring=#%s setlocal define=^\s*\\(def\\\\|class\\) +Setup max line length *'g:pymode_options_max_line_length'* +> + let g:pymode_options_max_line_length = 79 + Enable colorcolumn display at max_line_length *'g:pymode_options_colorcolumn'* > let g:pymode_options_colorcolumn = 1 From d8f04f943e9614e436bd9ef6f7b22f27c3bf79bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Korzeniewski?= Date: Sun, 31 May 2015 16:47:28 +0200 Subject: [PATCH 018/293] Add wdb to debugger list in breakpoint cmd. --- autoload/pymode/breakpoint.vim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoload/pymode/breakpoint.vim b/autoload/pymode/breakpoint.vim index 18e1a95b..cf7b95be 100644 --- a/autoload/pymode/breakpoint.vim +++ b/autoload/pymode/breakpoint.vim @@ -17,7 +17,7 @@ fun! pymode#breakpoint#init() "{{{ from imp import find_module -for module in ('pudb', 'ipdb'): +for module in ('wdb', 'pudb', 'ipdb'): try: find_module(module) vim.command('let g:pymode_breakpoint_cmd = "import %s; %s.set_trace() # XXX BREAKPOINT"' % (module, module)) From 8144e994cabe27a8f169c1bf001c58ad0e998d20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guido=20Perc=C3=BA?= Date: Wed, 1 Jul 2015 18:06:24 -0300 Subject: [PATCH 019/293] Documentation is on :help pymode --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index b6f9bb69..827dd95c 100644 --- a/README.rst +++ b/README.rst @@ -40,7 +40,7 @@ See (very old) screencast here: http://www.youtube.com/watch?v=67OZNp9Z0CQ (sorry for quality, this is my first screencast) Another old presentation here: http://www.youtube.com/watch?v=YhqsjUUHj6g -**To read python-mode documentation in Vim, see** ``:help pymode.txt`` +**To read python-mode documentation in Vim, see** ``:help pymode`` .. contents:: From 88969fb67e3c7c32c711954abdb86e94c9686fe4 Mon Sep 17 00:00:00 2001 From: Robin Schneider Date: Sun, 5 Jul 2015 22:59:16 +0200 Subject: [PATCH 020/293] g:pymode_trim_whitespaces = 0 was ignored. * Added condition to the trim function itself. --- autoload/pymode.vim | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/autoload/pymode.vim b/autoload/pymode.vim index c518c415..8c4cdea6 100644 --- a/autoload/pymode.vim +++ b/autoload/pymode.vim @@ -76,9 +76,11 @@ endfunction "}}} " DESC: Remove unused whitespaces fun! pymode#trim_whitespaces() "{{{ - let cursor_pos = getpos('.') - silent! %s/\s\+$// - call setpos('.', cursor_pos) + if g:pymode_trim_whitespaces + let cursor_pos = getpos('.') + silent! %s/\s\+$// + call setpos('.', cursor_pos) + endif endfunction "}}} From 08ec591f46eb3e7ada39c1f79d0ffdbab091317d Mon Sep 17 00:00:00 2001 From: Kirill Klenov Date: Tue, 25 Aug 2015 18:43:00 +0300 Subject: [PATCH 021/293] Update Rope --- AUTHORS | 2 +- Changelog.rst | 2 ++ Makefile | 6 ++++++ README.rst | 4 ++-- doc/pymode.txt | 8 ++++---- plugin/pymode.vim | 2 +- pylama.ini | 11 ++++++++--- pymode/libs2/rope/base/default_config.py | 5 +++-- pymode/libs3/rope/base/default_config.py | 6 +++--- pymode/rope.py | 18 ++++++------------ 10 files changed, 36 insertions(+), 28 deletions(-) diff --git a/AUTHORS b/AUTHORS index ef6d0bbc..99ed09ec 100644 --- a/AUTHORS +++ b/AUTHORS @@ -40,6 +40,7 @@ Contributors: * Sorin Ionescu (sorin-ionescu); * Steve Losh (http://github.com/sjl); * Tommy Allen (https://github.com/tweekmonster) +* Tyler Fenby (https://github.com/TFenby) * Wayne Ye (https://github.com/WayneYe) * bendavis78 (http://github.com/bendavis78) * fwuzju (http://github.com/fwuzju) @@ -47,4 +48,3 @@ Contributors: * lee (loyalpartner); * nixon; * tramchamploo; -* Tyler Fenby (https://github.com/TFenby) diff --git a/Changelog.rst b/Changelog.rst index ac48fccb..6e728832 100644 --- a/Changelog.rst +++ b/Changelog.rst @@ -2,12 +2,14 @@ Changelog ========= * Pylama updated to version 5.0.5 +* Rope libs updated * Add 'pymode_options_max_line_length' option * Add ability to set related checker options `:help pymode-lint-options` Options added: 'pymode_lint_options_pep8', 'pymode_lint_options_pep257', 'pymode_lint_options_mccabe', 'pymode_lint_options_pyflakes', 'pymode_lint_options_pylint' + ## 2014-06-11 0.8.1 ------------------- * Pylama updated to version 3.3.2 diff --git a/Makefile b/Makefile index a4370605..38c009c2 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,12 @@ pylama: make $(PYLAMA) make $(PYLAMA)/lint/pylama_pylint +.PHONY: rope +rope: + @git clone https://github.com/python-rope/rope.git $(CURDIR)/_/rope + @rm -rf $(CURDIR)/pymode/libs/rope + @cp -r $(CURDIR)/_/rope/rope $(CURDIR)/pymode/libs/. + $(PYLAMA): cp -r ~/Dropbox/projects/pylama/pylama $(PYLAMA) diff --git a/README.rst b/README.rst index b6f9bb69..54b659ae 100644 --- a/README.rst +++ b/README.rst @@ -187,8 +187,8 @@ License Licensed under a `GNU lesser general public license`_. -If you like this plugin, you can send me postcard :) -My address is here: "Russia, 143401, Krasnogorsk, Shkolnaya 1-19" to "Kirill Klenov". +If you like this plugin, I would very appreciated if you kindly send me a postcard :) +My address is here: "Russia, 143500, MO, Istra, pos. Severny 8-3" to "Kirill Klenov". **Thanks for support!** .. _GNU lesser general public license: http://www.gnu.org/copyleft/lesser.html diff --git a/doc/pymode.txt b/doc/pymode.txt index 33968b3e..55e51a4f 100644 --- a/doc/pymode.txt +++ b/doc/pymode.txt @@ -482,7 +482,7 @@ Keymap for autocomplete *'g:pymode_rope_completion_bind'* Extended autocompletion (rope could complete objects which have not been imported) from project *'g:pymode_rope_autoimport'* > - let g:pymode_rope_autoimport = 1 + let g:pymode_rope_autoimport = 0 Load modules to autoimport by default *'g:pymode_rope_autoimport_modules'* > @@ -781,10 +781,10 @@ The sequence of commands that fixed this: Python-mode is released under the GNU lesser general public license. See: http://www.gnu.org/copyleft/lesser.html -If you like this plugin, you can send me a postcard :) +If you like this plugin, I would very appreciated if you kindly send me a postcard :) -My address is: "Russia, 143401, Krasnogorsk, Shkolnaya 1-19" to "Kirill -Klenov". Thanks for your support! +My address is: "Russia, 143500, MO, Istra, pos. Severny 8-3" to "Kirill Klenov". +Thanks for your support! ------------------------------------------------------------------------------ diff --git a/plugin/pymode.vim b/plugin/pymode.vim index 9bd4d95c..de93cf29 100644 --- a/plugin/pymode.vim +++ b/plugin/pymode.vim @@ -183,7 +183,7 @@ call pymode#default('g:pymode_rope_completion', 1) " Complete keywords from not imported modules (could make completion slower) " Enable autoimport used modules -call pymode#default('g:pymode_rope_autoimport', 1) +call pymode#default('g:pymode_rope_autoimport', 0) " Offer to import object after complete (if that not be imported before) call pymode#default('g:pymode_rope_autoimport_import_after_complete', 0) diff --git a/pylama.ini b/pylama.ini index 07c1ab7a..b8d3f375 100644 --- a/pylama.ini +++ b/pylama.ini @@ -1,3 +1,8 @@ -[main] -ignore = R0201,R0922,E1103 -skip = pymode/autopep8.py +[pylama] +linters=pep8,pyflakes,pylint + +[pylama:pymode/libs*] +skip=1 + +[pylama:pylint] +disable=E1120,E1130,E1103,W1401 diff --git a/pymode/libs2/rope/base/default_config.py b/pymode/libs2/rope/base/default_config.py index 0ee9937d..3745e306 100644 --- a/pymode/libs2/rope/base/default_config.py +++ b/pymode/libs2/rope/base/default_config.py @@ -13,8 +13,9 @@ def set_prefs(prefs): # '.svn': matches 'pkg/.svn' and all of its children # 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o' # 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o' - prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject', - '.hg', '.svn', '_svn', '.git', '.tox'] + prefs['ignored_resources'] = [ + '*.pyc', '*~', '.ropeproject', '.hg', '.svn', '_svn', '.git', + '.tox', '.env', 'node_modules', 'bower_components'] # Specifies which files should be considered python files. It is # useful when you have scripts inside your project. Only files diff --git a/pymode/libs3/rope/base/default_config.py b/pymode/libs3/rope/base/default_config.py index eda47b24..126cf7bf 100644 --- a/pymode/libs3/rope/base/default_config.py +++ b/pymode/libs3/rope/base/default_config.py @@ -13,9 +13,9 @@ def set_prefs(prefs): # '.svn': matches 'pkg/.svn' and all of its children # 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o' # 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o' - prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject', - '.hg', '.svn', '_svn', '.git', - '__pycache__'] + prefs['ignored_resources'] = [ + '*.pyc', '*~', '.ropeproject', '.hg', '.svn', '_svn', '.git', + '__pycache__', '.tox', '.env', 'node_modules', 'bower_components'] # Specifies which files should be considered python files. It is # useful when you have scripts inside your project. Only files diff --git a/pymode/rope.py b/pymode/rope.py index 159900bf..2347e49e 100644 --- a/pymode/rope.py +++ b/pymode/rope.py @@ -1,7 +1,6 @@ """ Rope support in pymode. """ from __future__ import absolute_import, print_function -import multiprocessing import os.path import re import site @@ -13,7 +12,6 @@ from rope.contrib import autoimport as rope_autoimport, codeassist, findit, generate # noqa from rope.refactor import ModuleToPackage, ImportOrganizer, rename, extract, inline, usefunction, move, change_signature, importutils # noqa -from ._compat import StringIO from .environment import env @@ -352,8 +350,7 @@ def __init__(self, path, project_path): """ Init Rope context. """ self.path = path - self.project = project.Project( - project_path, fscommands=FileSystemCommands()) + self.project = project.Project(project_path, fscommands=FileSystemCommands()) self.importer = rope_autoimport.AutoImport( project=self.project, observe=False) @@ -462,8 +459,8 @@ def run(self): action = env.user_input_choices( 'Choose what to do:', 'perform', 'preview', - 'perform in class hierarchy', - 'preview in class hierarchy') + 'perform in class hierarchy', + 'preview in class hierarchy') in_hierarchy = action.endswith("in class hierarchy") @@ -512,7 +509,7 @@ def get_changes(refactor, input_str, in_hierarchy=False): """ progress = ProgressHandler('Calculate changes ...') return refactor.get_changes( - input_str, task_handle=progress.handle, in_hierarchy = in_hierarchy) + input_str, task_handle=progress.handle, in_hierarchy=in_hierarchy) class RenameRefactoring(Refactoring): @@ -746,13 +743,12 @@ def get_refactor(ctx): return change_signature.ChangeSignature( ctx.project, ctx.resource, offset) - def get_changes(self, refactor, input_string): + def get_changes(self, refactor, input_string, in_hierarchy=False): """ Function description. :return Rope.changes: """ - args = re.sub(r'[\s\(\)]+', '', input_string).split(',') olds = [arg[0] for arg in refactor.get_args()] @@ -793,7 +789,7 @@ def get_refactor(self, ctx): return generate.create_generate( self.kind, ctx.project, ctx.resource, offset) - def get_changes(self, refactor, input_str): + def get_changes(self, refactor, input_str, in_hierarchy=False): """ Function description. :return Rope.changes: @@ -938,5 +934,3 @@ def _insert_import(name, module, ctx): progress = ProgressHandler('Apply changes ...') ctx.project.do(changes, task_handle=progress.handle) reload_changes(changes) - -# pylama:ignore=W1401,E1120,D From 59a300e99a1dfd03ec5f3919ef41fa42848f4676 Mon Sep 17 00:00:00 2001 From: Kirill Klenov Date: Wed, 26 Aug 2015 13:03:35 +0300 Subject: [PATCH 022/293] Update authors --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 5f97486f..aad20bd1 100644 --- a/AUTHORS +++ b/AUTHORS @@ -38,6 +38,7 @@ Contributors: * Piet Delport (http://github.com/pjdelport); * Robert David Grant (http://github.com/bgrant); * Ronald Andreu Kaiser (http://github.com/cathoderay); +* Samir Benmendil (https://github.com/Ram-Z) * Sorin Ionescu (sorin-ionescu); * Steve Losh (http://github.com/sjl); * Tommy Allen (https://github.com/tweekmonster) From fa6322e04c53916b6f947c7e33c359c3af2e281e Mon Sep 17 00:00:00 2001 From: Jacob Niehus Date: Sun, 30 Aug 2015 18:26:54 -0700 Subject: [PATCH 023/293] Fix folding after blank line in class/def --- autoload/pymode/folding.vim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoload/pymode/folding.vim b/autoload/pymode/folding.vim index c869b05f..3ed61bc5 100644 --- a/autoload/pymode/folding.vim +++ b/autoload/pymode/folding.vim @@ -51,7 +51,7 @@ fun! pymode#folding#expr(lnum) "{{{ if line =~ s:def_regex " single line def - if indent(a:lnum) >= indent(a:lnum+1) + if indent(a:lnum) >= indent(a:lnum+1) && getline(prevnonblank(a:lnum)) !~ ':\s*$' return '=' endif " Check if last decorator is before the last def From 294894abfd9925261f88f0b874e853e2fe362903 Mon Sep 17 00:00:00 2001 From: Vincent Driessen Date: Mon, 7 Sep 2015 08:54:54 +0200 Subject: [PATCH 024/293] Fix fold marker --- autoload/pymode.vim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoload/pymode.vim b/autoload/pymode.vim index 8c4cdea6..1ce29c3f 100644 --- a/autoload/pymode.vim +++ b/autoload/pymode.vim @@ -109,7 +109,7 @@ endfunction "}}} fun! pymode#buffer_pre_write() "{{{ let b:pymode_modified = &modified -endfunction +endfunction "}}} fun! pymode#buffer_post_write() "{{{ if g:pymode_rope From f395f43197b159c18ba12c036bd0d6794ee87290 Mon Sep 17 00:00:00 2001 From: Vincent Driessen Date: Mon, 7 Sep 2015 08:55:33 +0200 Subject: [PATCH 025/293] Flip operands This avoids accessing the b:python_modified value, which under some circumstances cannot be set, which results in the following error: Error detected while processing function pymode#buffer_post_write: line 2: E121: Undefined variable: b:pymode_modified E15: Invalid expression: b:pymode_modified && g:pymode_rope_regenerate_on_write Note that this does not address the core issue with why pymode_modified if unset in the first place, but this avoids that being a problem if g:python_rope_regenerate_on_write is not wanted anyway. --- autoload/pymode.vim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoload/pymode.vim b/autoload/pymode.vim index 1ce29c3f..6c5de80c 100644 --- a/autoload/pymode.vim +++ b/autoload/pymode.vim @@ -113,7 +113,7 @@ endfunction "}}} fun! pymode#buffer_post_write() "{{{ if g:pymode_rope - if b:pymode_modified && g:pymode_rope_regenerate_on_write + if g:pymode_rope_regenerate_on_write && b:pymode_modified call pymode#debug('regenerate') call pymode#rope#regenerate() endif From af4268183a9a005da6ea37874f720ae90e64ff3b Mon Sep 17 00:00:00 2001 From: Kirill Klenov Date: Mon, 7 Sep 2015 20:57:45 +0300 Subject: [PATCH 026/293] Update pylama --- AUTHORS | 3 + Changelog.rst | 6 + Makefile | 4 +- pymode/autopep8.py | 773 ++-- pymode/environment.py | 52 +- pymode/libs/_markerlib/__init__.py | 16 + pymode/libs/_markerlib/markers.py | 119 + .../pylama_pylint => }/astroid/__init__.py | 15 +- .../pylama_pylint => }/astroid/__pkginfo__.py | 16 +- .../pylama_pylint => }/astroid/as_string.py | 85 +- pymode/libs/astroid/astpeephole.py | 86 + .../lint/pylama_pylint => }/astroid/bases.py | 94 +- .../libs/astroid/brain/builtin_inference.py | 245 ++ .../pylama_pylint => }/astroid/brain/py2gi.py | 52 +- .../astroid/brain/py2mechanize.py | 10 +- pymode/libs/astroid/brain/py2pytest.py | 31 + pymode/libs/astroid/brain/py2qt4.py | 22 + pymode/libs/astroid/brain/py2stdlib.py | 334 ++ pymode/libs/astroid/brain/pynose.py | 79 + pymode/libs/astroid/brain/pysix_moves.py | 261 ++ .../pylama_pylint => }/astroid/builder.py | 54 +- .../pylama_pylint => }/astroid/exceptions.py | 0 .../pylama_pylint => }/astroid/inference.py | 80 +- pymode/libs/astroid/inspector.py | 273 ++ .../pylama_pylint => }/astroid/manager.py | 127 +- .../lint/pylama_pylint => }/astroid/mixins.py | 20 +- pymode/libs/astroid/modutils.py | 670 ++++ .../astroid/node_classes.py | 116 +- .../lint/pylama_pylint => }/astroid/nodes.py | 1 + .../pylama_pylint => }/astroid/protocols.py | 117 +- .../astroid/raw_building.py | 35 +- .../pylama_pylint => }/astroid/rebuilder.py | 182 +- .../astroid/scoped_nodes.py | 526 ++- pymode/libs/astroid/test_utils.py | 218 ++ .../lint/pylama_pylint => }/astroid/utils.py | 29 +- pymode/libs/easy_install.py | 5 + .../logilab/common/__init__.py | 17 +- pymode/libs/logilab/common/cache.py | 114 + .../logilab/common/changelog.py | 6 +- pymode/libs/logilab/common/clcommands.py | 334 ++ pymode/libs/logilab/common/compat.py | 78 + .../logilab/common/configuration.py | 93 +- pymode/libs/logilab/common/daemon.py | 101 + pymode/libs/logilab/common/date.py | 335 ++ pymode/libs/logilab/common/debugger.py | 214 ++ .../logilab/common/decorators.py | 18 +- .../logilab/common/deprecation.py | 5 +- pymode/libs/logilab/common/fileutils.py | 404 +++ .../logilab/common/graph.py | 22 +- .../logilab/common/interface.py | 0 pymode/libs/logilab/common/logging_ext.py | 195 ++ .../logilab/common/modutils.py | 58 +- .../logilab/common/optik_ext.py | 17 +- pymode/libs/logilab/common/optparser.py | 92 + pymode/libs/logilab/common/proc.py | 277 ++ pymode/libs/logilab/common/pytest.py | 1202 +++++++ pymode/libs/logilab/common/registry.py | 1125 ++++++ pymode/libs/logilab/common/shellutils.py | 462 +++ pymode/libs/logilab/common/sphinx_ext.py | 87 + pymode/libs/logilab/common/sphinxutils.py | 122 + pymode/libs/logilab/common/table.py | 929 +++++ pymode/libs/logilab/common/tasksqueue.py | 101 + pymode/libs/logilab/common/testlib.py | 1338 +++++++ .../logilab/common/textutils.py | 7 +- .../pylama_pylint => }/logilab/common/tree.py | 0 pymode/libs/logilab/common/umessage.py | 194 + .../logilab/common/ureports/__init__.py | 18 +- .../logilab/common/ureports/docbook_writer.py | 3 +- .../logilab/common/ureports/html_writer.py | 70 +- .../logilab/common/ureports/nodes.py | 4 +- .../logilab/common/ureports/text_writer.py | 37 +- pymode/libs/logilab/common/urllib2ext.py | 89 + pymode/libs/logilab/common/vcgutils.py | 216 ++ .../logilab/common/visitor.py | 8 +- pymode/libs/logilab/common/xmlutils.py | 61 + .../libs/logilab_common-1.0.2-py2.7-nspkg.pth | 1 + .../DESCRIPTION.rst | 153 + .../logilab_common-1.0.2.dist-info/METADATA | 169 + .../logilab_common-1.0.2.dist-info/RECORD | 87 + .../libs/logilab_common-1.0.2.dist-info/WHEEL | 5 + .../metadata.json | 1 + .../namespace_packages.txt | 1 + .../top_level.txt | 1 + .../{pylama/lint/pylama_mccabe => }/mccabe.py | 78 +- pymode/libs/pep257.py | 1187 +++++++ .../{pylama/lint/pylama_pep8 => }/pep8.py | 273 +- pymode/libs/pkg_resources/__init__.py | 3113 +++++++++++++++++ .../_vendor}/__init__.py | 0 .../_vendor/packaging/__about__.py | 31 + .../_vendor/packaging/__init__.py | 24 + .../_vendor/packaging/_compat.py | 40 + .../_vendor/packaging/_structures.py | 78 + .../_vendor/packaging/specifiers.py | 784 +++++ .../_vendor/packaging/version.py | 403 +++ pymode/libs/pyflakes/__init__.py | 1 + pymode/libs/pyflakes/__main__.py | 5 + pymode/libs/pyflakes/api.py | 175 + .../pylama_pyflakes => }/pyflakes/checker.py | 69 +- .../pylama_pyflakes => }/pyflakes/messages.py | 15 +- pymode/libs/pyflakes/reporter.py | 81 + pymode/libs/pylama/__init__.py | 6 +- pymode/libs/pylama/__main__.py | 6 + pymode/libs/pylama/{tasks.py => async.py} | 52 +- pymode/libs/pylama/config.py | 54 +- pymode/libs/pylama/core.py | 60 +- pymode/libs/pylama/errors.py | 45 +- pymode/libs/pylama/hook.py | 20 +- pymode/libs/pylama/lint/__init__.py | 13 +- pymode/libs/pylama/lint/extensions.py | 37 +- pymode/libs/pylama/lint/pylama_mccabe.py | 29 + .../pylama/lint/pylama_mccabe/__init__.py | 20 - pymode/libs/pylama/lint/pylama_pep257.py | 21 + .../pylama/lint/pylama_pep257/__init__.py | 26 - .../libs/pylama/lint/pylama_pep257/pep257.py | 728 ---- .../__init__.py => pylama_pep8.py} | 28 +- pymode/libs/pylama/lint/pylama_pyflakes.py | 49 + .../pylama/lint/pylama_pyflakes/__init__.py | 65 - .../lint/pylama_pyflakes/pyflakes/__init__.py | 2 - .../pylama/lint/pylama_pylint/__init__.py | 9 +- .../pylama_pylint/astroid/brain/py2qt4.py | 25 - .../pylama_pylint/astroid/brain/py2stdlib.py | 252 -- .../logilab/common/__pkginfo__.py | 53 - .../pylama_pylint/logilab/common/compat.py | 243 -- pymode/libs/pylama/lint/pylama_pylint/main.py | 2 - .../pylama_pylint/pylint/checkers/stdlib.py | 69 - .../pylama_pylint/pylint/checkers/strings.py | 304 -- pymode/libs/pylama/main.py | 100 +- pymode/libs/pylama/pytest.py | 17 +- .../pylama_pylint => }/pylint/__init__.py | 2 + pymode/libs/pylint/__main__.py | 3 + .../pylama_pylint => }/pylint/__pkginfo__.py | 12 +- .../pylint/checkers/__init__.py | 43 +- .../pylint/checkers/base.py | 687 ++-- .../pylint/checkers/classes.py | 474 ++- .../pylint/checkers/design_analysis.py | 144 +- .../pylint/checkers/exceptions.py | 284 +- .../pylint/checkers/format.py | 221 +- .../pylint/checkers/imports.py | 81 +- .../pylint/checkers/logging.py | 99 +- .../pylint/checkers/misc.py | 50 +- .../pylint/checkers/newstyle.py | 49 +- pymode/libs/pylint/checkers/python3.py | 581 +++ .../pylint/checkers/raw_metrics.py | 0 .../pylint/checkers/similar.py | 55 +- pymode/libs/pylint/checkers/spelling.py | 250 ++ pymode/libs/pylint/checkers/stdlib.py | 216 ++ pymode/libs/pylint/checkers/strings.py | 615 ++++ .../pylint/checkers/typecheck.py | 270 +- .../pylint/checkers/utils.py | 192 +- .../pylint/checkers/variables.py | 474 ++- .../lint/pylama_pylint => }/pylint/config.py | 17 +- pymode/libs/pylint/epylint.py | 177 + pymode/libs/pylint/gui.py | 531 +++ .../pylama_pylint => }/pylint/interfaces.py | 16 +- .../lint/pylama_pylint => }/pylint/lint.py | 885 +++-- pymode/libs/pylint/pyreverse/__init__.py | 5 + pymode/libs/pylint/pyreverse/diadefslib.py | 233 ++ pymode/libs/pylint/pyreverse/diagrams.py | 247 ++ pymode/libs/pylint/pyreverse/main.py | 124 + pymode/libs/pylint/pyreverse/utils.py | 132 + pymode/libs/pylint/pyreverse/writer.py | 199 ++ .../pylint/reporters/__init__.py | 55 +- .../pylint/reporters/guireporter.py | 7 +- .../pylint/reporters/html.py | 47 +- pymode/libs/pylint/reporters/json.py | 58 + .../pylint/reporters/text.py | 37 +- pymode/libs/pylint/testutils.py | 412 +++ .../lint/pylama_pylint => }/pylint/utils.py | 588 ++-- pymode/libs/six.py | 838 +++++ pymode/lint.py | 16 +- pymode/rope.py | 2 +- pymode/run.py | 1 + pymode/utils.py | 11 +- 173 files changed, 27150 insertions(+), 4609 deletions(-) create mode 100644 pymode/libs/_markerlib/__init__.py create mode 100644 pymode/libs/_markerlib/markers.py rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/__init__.py (89%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/__pkginfo__.py (78%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/as_string.py (85%) create mode 100644 pymode/libs/astroid/astpeephole.py rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/bases.py (89%) create mode 100644 pymode/libs/astroid/brain/builtin_inference.py rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/brain/py2gi.py (75%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/brain/py2mechanize.py (53%) create mode 100644 pymode/libs/astroid/brain/py2pytest.py create mode 100644 pymode/libs/astroid/brain/py2qt4.py create mode 100644 pymode/libs/astroid/brain/py2stdlib.py create mode 100644 pymode/libs/astroid/brain/pynose.py create mode 100644 pymode/libs/astroid/brain/pysix_moves.py rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/builder.py (85%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/exceptions.py (100%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/inference.py (87%) create mode 100644 pymode/libs/astroid/inspector.py rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/manager.py (73%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/mixins.py (92%) create mode 100644 pymode/libs/astroid/modutils.py rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/node_classes.py (88%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/nodes.py (98%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/protocols.py (75%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/raw_building.py (92%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/rebuilder.py (87%) rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/scoped_nodes.py (67%) create mode 100644 pymode/libs/astroid/test_utils.py rename pymode/libs/{pylama/lint/pylama_pylint => }/astroid/utils.py (89%) create mode 100644 pymode/libs/easy_install.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/__init__.py (92%) create mode 100644 pymode/libs/logilab/common/cache.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/changelog.py (98%) create mode 100644 pymode/libs/logilab/common/clcommands.py create mode 100644 pymode/libs/logilab/common/compat.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/configuration.py (94%) create mode 100644 pymode/libs/logilab/common/daemon.py create mode 100644 pymode/libs/logilab/common/date.py create mode 100644 pymode/libs/logilab/common/debugger.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/decorators.py (95%) rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/deprecation.py (98%) create mode 100644 pymode/libs/logilab/common/fileutils.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/graph.py (93%) rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/interface.py (100%) create mode 100644 pymode/libs/logilab/common/logging_ext.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/modutils.py (94%) rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/optik_ext.py (96%) create mode 100644 pymode/libs/logilab/common/optparser.py create mode 100644 pymode/libs/logilab/common/proc.py create mode 100644 pymode/libs/logilab/common/pytest.py create mode 100644 pymode/libs/logilab/common/registry.py create mode 100644 pymode/libs/logilab/common/shellutils.py create mode 100644 pymode/libs/logilab/common/sphinx_ext.py create mode 100644 pymode/libs/logilab/common/sphinxutils.py create mode 100644 pymode/libs/logilab/common/table.py create mode 100644 pymode/libs/logilab/common/tasksqueue.py create mode 100644 pymode/libs/logilab/common/testlib.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/textutils.py (99%) rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/tree.py (100%) create mode 100644 pymode/libs/logilab/common/umessage.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/ureports/__init__.py (93%) rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/ureports/docbook_writer.py (99%) rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/ureports/html_writer.py (66%) rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/ureports/nodes.py (98%) rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/ureports/text_writer.py (82%) create mode 100644 pymode/libs/logilab/common/urllib2ext.py create mode 100644 pymode/libs/logilab/common/vcgutils.py rename pymode/libs/{pylama/lint/pylama_pylint => }/logilab/common/visitor.py (97%) create mode 100644 pymode/libs/logilab/common/xmlutils.py create mode 100644 pymode/libs/logilab_common-1.0.2-py2.7-nspkg.pth create mode 100644 pymode/libs/logilab_common-1.0.2.dist-info/DESCRIPTION.rst create mode 100644 pymode/libs/logilab_common-1.0.2.dist-info/METADATA create mode 100644 pymode/libs/logilab_common-1.0.2.dist-info/RECORD create mode 100644 pymode/libs/logilab_common-1.0.2.dist-info/WHEEL create mode 100644 pymode/libs/logilab_common-1.0.2.dist-info/metadata.json create mode 100644 pymode/libs/logilab_common-1.0.2.dist-info/namespace_packages.txt create mode 100644 pymode/libs/logilab_common-1.0.2.dist-info/top_level.txt rename pymode/libs/{pylama/lint/pylama_mccabe => }/mccabe.py (86%) create mode 100644 pymode/libs/pep257.py rename pymode/libs/{pylama/lint/pylama_pep8 => }/pep8.py (89%) create mode 100644 pymode/libs/pkg_resources/__init__.py rename pymode/libs/{pylama/lint/pylama_pylint/logilab => pkg_resources/_vendor}/__init__.py (100%) create mode 100644 pymode/libs/pkg_resources/_vendor/packaging/__about__.py create mode 100644 pymode/libs/pkg_resources/_vendor/packaging/__init__.py create mode 100644 pymode/libs/pkg_resources/_vendor/packaging/_compat.py create mode 100644 pymode/libs/pkg_resources/_vendor/packaging/_structures.py create mode 100644 pymode/libs/pkg_resources/_vendor/packaging/specifiers.py create mode 100644 pymode/libs/pkg_resources/_vendor/packaging/version.py create mode 100644 pymode/libs/pyflakes/__init__.py create mode 100644 pymode/libs/pyflakes/__main__.py create mode 100644 pymode/libs/pyflakes/api.py rename pymode/libs/{pylama/lint/pylama_pyflakes => }/pyflakes/checker.py (92%) rename pymode/libs/{pylama/lint/pylama_pyflakes => }/pyflakes/messages.py (94%) create mode 100644 pymode/libs/pyflakes/reporter.py create mode 100644 pymode/libs/pylama/__main__.py rename pymode/libs/pylama/{tasks.py => async.py} (55%) create mode 100644 pymode/libs/pylama/lint/pylama_mccabe.py delete mode 100644 pymode/libs/pylama/lint/pylama_mccabe/__init__.py create mode 100644 pymode/libs/pylama/lint/pylama_pep257.py delete mode 100644 pymode/libs/pylama/lint/pylama_pep257/__init__.py delete mode 100644 pymode/libs/pylama/lint/pylama_pep257/pep257.py rename pymode/libs/pylama/lint/{pylama_pep8/__init__.py => pylama_pep8.py} (65%) create mode 100644 pymode/libs/pylama/lint/pylama_pyflakes.py delete mode 100644 pymode/libs/pylama/lint/pylama_pyflakes/__init__.py delete mode 100644 pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/__init__.py delete mode 100644 pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2qt4.py delete mode 100644 pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2stdlib.py delete mode 100644 pymode/libs/pylama/lint/pylama_pylint/logilab/common/__pkginfo__.py delete mode 100644 pymode/libs/pylama/lint/pylama_pylint/logilab/common/compat.py delete mode 100644 pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/stdlib.py delete mode 100644 pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/strings.py rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/__init__.py (96%) create mode 100644 pymode/libs/pylint/__main__.py rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/__pkginfo__.py (90%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/__init__.py (75%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/base.py (64%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/classes.py (65%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/design_analysis.py (75%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/exceptions.py (52%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/format.py (84%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/imports.py (86%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/logging.py (69%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/misc.py (64%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/newstyle.py (76%) create mode 100644 pymode/libs/pylint/checkers/python3.py rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/raw_metrics.py (100%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/similar.py (91%) create mode 100644 pymode/libs/pylint/checkers/spelling.py create mode 100644 pymode/libs/pylint/checkers/stdlib.py create mode 100644 pymode/libs/pylint/checkers/strings.py rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/typecheck.py (65%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/utils.py (69%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/checkers/variables.py (60%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/config.py (91%) create mode 100644 pymode/libs/pylint/epylint.py create mode 100644 pymode/libs/pylint/gui.py rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/interfaces.py (76%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/lint.py (56%) create mode 100644 pymode/libs/pylint/pyreverse/__init__.py create mode 100644 pymode/libs/pylint/pyreverse/diadefslib.py create mode 100644 pymode/libs/pylint/pyreverse/diagrams.py create mode 100644 pymode/libs/pylint/pyreverse/main.py create mode 100644 pymode/libs/pylint/pyreverse/utils.py create mode 100644 pymode/libs/pylint/pyreverse/writer.py rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/reporters/__init__.py (76%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/reporters/guireporter.py (74%) rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/reporters/html.py (59%) create mode 100644 pymode/libs/pylint/reporters/json.py rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/reporters/text.py (84%) create mode 100644 pymode/libs/pylint/testutils.py rename pymode/libs/{pylama/lint/pylama_pylint => }/pylint/utils.py (63%) create mode 100644 pymode/libs/six.py diff --git a/AUTHORS b/AUTHORS index aad20bd1..4ffea0c1 100644 --- a/AUTHORS +++ b/AUTHORS @@ -33,10 +33,13 @@ Contributors: * Mel Boyce (http://github.com/syngin) * Mohammed (http://github.com/mbadran); * Naoya Inada (http://github.com/naoina); +* Nate Zhang (https://github.com/natezhang93) +* Paweł Korzeniewski (https://github.com/korzeniewskipl) * Pedro Algarvio (http://github.com/s0undt3ch); * Phillip Cloud (http://github.com/cpcloud); * Piet Delport (http://github.com/pjdelport); * Robert David Grant (http://github.com/bgrant); +* Robin Schneider (https://github.com/ypid) * Ronald Andreu Kaiser (http://github.com/cathoderay); * Samir Benmendil (https://github.com/Ram-Z) * Sorin Ionescu (sorin-ionescu); diff --git a/Changelog.rst b/Changelog.rst index 6e728832..e396eb69 100644 --- a/Changelog.rst +++ b/Changelog.rst @@ -3,11 +3,17 @@ Changelog * Pylama updated to version 5.0.5 * Rope libs updated +* Add wdb to debugger list in breakpoint cmd * Add 'pymode_options_max_line_length' option * Add ability to set related checker options `:help pymode-lint-options` Options added: 'pymode_lint_options_pep8', 'pymode_lint_options_pep257', 'pymode_lint_options_mccabe', 'pymode_lint_options_pyflakes', 'pymode_lint_options_pylint' +* Highlight comments inside class/function arg lists +* Don't fold single line def +* Don't skip a line when the first docstring contains text +* Add Python documentation vertical display option +* Rope: correct refactoring function calls ## 2014-06-11 0.8.1 diff --git a/Makefile b/Makefile index 38c009c2..e27a8785 100644 --- a/Makefile +++ b/Makefile @@ -33,10 +33,10 @@ rope: @cp -r $(CURDIR)/_/rope/rope $(CURDIR)/pymode/libs/. $(PYLAMA): - cp -r ~/Dropbox/projects/pylama/pylama $(PYLAMA) + cp -r $$PRJDIR/pylama/pylama $(PYLAMA) $(PYLAMA)/lint/pylama_pylint: - cp -r ~/Dropbox/projects/pylama/plugins/pylama_pylint/pylama_pylint/ $(PYLAMA)/lint/pylama_pylint + cp -r $$PRJDIR/pylama/plugins/pylama_pylint/pylama_pylint/ $(PYLAMA)/lint/pylama_pylint $(CURDIR)/build: mkdir -p $(CURDIR)/build/usr/share/vim/addons diff --git a/pymode/autopep8.py b/pymode/autopep8.py index 5f3ccf0b..13308751 100644 --- a/pymode/autopep8.py +++ b/pymode/autopep8.py @@ -1,8 +1,8 @@ #!/usr/bin/env python -# + # Copyright (C) 2010-2011 Hideo Hattori # Copyright (C) 2011-2013 Hideo Hattori, Steven Myint -# Copyright (C) 2013-2014 Hideo Hattori, Steven Myint, Bill Wendling +# Copyright (C) 2013-2015 Hideo Hattori, Steven Myint, Bill Wendling # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -53,10 +53,11 @@ import re import signal import sys +import textwrap import token import tokenize -from pylama.lint.pylama_pep8 import pep8 +import pep8 try: @@ -65,7 +66,7 @@ unicode = str -__version__ = '1.0' +__version__ = '1.2.1a0' CR = '\r' @@ -93,6 +94,7 @@ # W602 is handled separately due to the need to avoid "with_traceback". CODE_TO_2TO3 = { + 'E231': ['ws_comma'], 'E721': ['idioms'], 'W601': ['has_key'], 'W603': ['ne'], @@ -100,7 +102,6 @@ 'W690': ['apply', 'except', 'exitfunc', - 'import', 'numliterals', 'operator', 'paren', @@ -113,6 +114,14 @@ 'xreadlines']} +if sys.platform == 'win32': # pragma: no cover + DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') +else: + DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or + os.path.expanduser('~/.config'), 'pep8') +PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') + + def open_with_encoding(filename, encoding=None, mode='r'): """Return opened file with a specific encoding.""" if not encoding: @@ -146,15 +155,13 @@ def readlines_from_file(filename): def extended_blank_lines(logical_line, blank_lines, + blank_before, indent_level, previous_logical): """Check for missing blank lines after class declaration.""" if previous_logical.startswith('class '): - if ( - logical_line.startswith(('def ', 'class ', '@')) or - pep8.DOCSTRING_REGEX.match(logical_line) - ): - if indent_level and not blank_lines: + if logical_line.startswith(('def ', 'class ', '@')): + if indent_level and not blank_lines and not blank_before: yield (0, 'E309 expected 1 blank line after class declaration') elif previous_logical.startswith('def '): if blank_lines and pep8.DOCSTRING_REGEX.match(logical_line): @@ -164,6 +171,7 @@ def extended_blank_lines(logical_line, if ( indent_level and not blank_lines and + not blank_before and logical_line.startswith(('def ')) and '(self' in logical_line ): @@ -171,7 +179,8 @@ def extended_blank_lines(logical_line, pep8.register_check(extended_blank_lines) -def continued_indentation(logical_line, tokens, indent_level, noqa): +def continued_indentation(logical_line, tokens, indent_level, indent_char, + noqa): """Override pep8's function to provide indentation information.""" first_row = tokens[0][2][0] nrows = 1 + tokens[-1][2][0] - first_row @@ -185,6 +194,11 @@ def continued_indentation(logical_line, tokens, indent_level, noqa): indent_next = logical_line.endswith(':') row = depth = 0 + valid_hangs = ( + (DEFAULT_INDENT_SIZE,) + if indent_char != '\t' else (DEFAULT_INDENT_SIZE, + 2 * DEFAULT_INDENT_SIZE) + ) # Remember how many brackets were opened on each line. parens = [0] * nrows @@ -192,6 +206,11 @@ def continued_indentation(logical_line, tokens, indent_level, noqa): # Relative indents of physical lines. rel_indent = [0] * nrows + # For each depth, collect a list of opening rows. + open_rows = [[0]] + # For each depth, memorize the hanging indentation. + hangs = [None] + # Visual indents. indent_chances = {} last_indent = tokens[0][2] @@ -217,17 +236,18 @@ def continued_indentation(logical_line, tokens, indent_level, noqa): # Record the initial indent. rel_indent[row] = pep8.expand_indent(line) - indent_level - if depth: - # A bracket expression in a continuation line. - # Find the line that it was opened on. - for open_row in range(row - 1, -1, -1): - if parens[open_row]: - break - else: - # An unbracketed continuation line (ie, backslash). - open_row = 0 - hang = rel_indent[row] - rel_indent[open_row] + # Identify closing bracket. close_bracket = (token_type == tokenize.OP and text in ']})') + + # Is the indent relative to an opening bracket line? + for open_row in reversed(open_rows[depth]): + hang = rel_indent[row] - rel_indent[open_row] + hanging_indent = hang in valid_hangs + if hanging_indent: + break + if hangs[depth]: + hanging_indent = (hang == hangs[depth]) + visual_indent = (not close_bracket and hang > 0 and indent_chances.get(start[1])) @@ -237,23 +257,23 @@ def continued_indentation(logical_line, tokens, indent_level, noqa): yield (start, 'E124 {0}'.format(indent[depth])) elif close_bracket and not hang: pass - elif visual_indent is True: - # Visual indent is verified. - if not indent[depth]: - indent[depth] = start[1] - elif visual_indent in (text, unicode): - # Ignore token lined up with matching one from a previous line. - pass elif indent[depth] and start[1] < indent[depth]: # Visual indent is broken. yield (start, 'E128 {0}'.format(indent[depth])) - elif (hang == DEFAULT_INDENT_SIZE or + elif (hanging_indent or (indent_next and rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)): # Hanging indent is verified. if close_bracket: yield (start, 'E123 {0}'.format(indent_level + rel_indent[open_row])) + hangs[depth] = hang + elif visual_indent is True: + # Visual indent is verified. + indent[depth] = start[1] + elif visual_indent in (text, unicode): + # Ignore token lined up with matching one from a previous line. + pass else: one_indented = (indent_level + rel_indent[open_row] + DEFAULT_INDENT_SIZE) @@ -262,16 +282,20 @@ def continued_indentation(logical_line, tokens, indent_level, noqa): error = ('E122', one_indented) elif indent[depth]: error = ('E127', indent[depth]) - elif hang % DEFAULT_INDENT_SIZE: - error = ('E121', one_indented) - else: + elif hang > DEFAULT_INDENT_SIZE: error = ('E126', one_indented) + else: + hangs[depth] = hang + error = ('E121', one_indented) yield (start, '{0} {1}'.format(*error)) # Look for visual indenting. - if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) - and not indent[depth]): + if ( + parens[row] and + token_type not in (tokenize.NL, tokenize.COMMENT) and + not indent[depth] + ): indent[depth] = start[1] indent_chances[start[1]] = True # Deal with implicit string concatenation. @@ -282,29 +306,36 @@ def continued_indentation(logical_line, tokens, indent_level, noqa): # 4. elif not indent_chances and not row and not depth and text == 'if': indent_chances[end[1] + 1] = True + elif text == ':' and line[end[1]:].isspace(): + open_rows[depth].append(row) # Keep track of bracket depth. if token_type == tokenize.OP: if text in '([{': depth += 1 indent.append(0) + hangs.append(None) + if len(open_rows) == depth: + open_rows.append([]) + open_rows[depth].append(row) parens[row] += 1 elif text in ')]}' and depth > 0: # Parent indents should not be more than this one. prev_indent = indent.pop() or last_indent[1] + hangs.pop() for d in range(depth): if indent[d] > prev_indent: indent[d] = 0 for ind in list(indent_chances): if ind >= prev_indent: del indent_chances[ind] + del open_rows[depth + 1:] depth -= 1 if depth: indent_chances[indent[depth]] = True for idx in range(row, -1, -1): if parens[idx]: parens[idx] -= 1 - rel_indent[row] = rel_indent[idx] break assert len(indent) == depth + 1 if ( @@ -316,6 +347,9 @@ def continued_indentation(logical_line, tokens, indent_level, noqa): indent_chances[start[1]] = text last_token_multiline = (start[0] != end[0]) + if last_token_multiline: + rel_indent[end[0] - first_row] = rel_indent[row] + last_line = line if ( @@ -323,8 +357,9 @@ def continued_indentation(logical_line, tokens, indent_level, noqa): not last_line_begins_with_multiline and pep8.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE ): - yield (last_indent, 'E125 {0}'.format(indent_level + - 2 * DEFAULT_INDENT_SIZE)) + pos = (start[0], indent[0] + 4) + yield (pos, 'E125 {0}'.format(indent_level + + 2 * DEFAULT_INDENT_SIZE)) del pep8._checks['logical_line'][pep8.continued_indentation] pep8.register_check(continued_indentation) @@ -382,7 +417,10 @@ def __init__(self, filename, set() if long_line_ignore_cache is None else long_line_ignore_cache) - # method definition + # Many fixers are the same even though pep8 categorizes them + # differently. + self.fix_e115 = self.fix_e112 + self.fix_e116 = self.fix_e113 self.fix_e121 = self._fix_reindent self.fix_e122 = self._fix_reindent self.fix_e123 = self._fix_reindent @@ -412,8 +450,7 @@ def __init__(self, filename, options and (options.aggressive >= 2 or options.experimental) else self.fix_long_line_physically) self.fix_e703 = self.fix_e702 - - self._ws_comma_done = False + self.fix_w293 = self.fix_w291 def _fix_source(self, results): try: @@ -501,15 +538,20 @@ def fix(self): n=len(results), progress=progress), file=sys.stderr) if self.options.line_range: - results = [ - r for r in results - if self.options.line_range[0] <= r['line'] <= - self.options.line_range[1]] + start, end = self.options.line_range + results = [r for r in results + if start <= r['line'] <= end] self._fix_source(filter_results(source=''.join(self.source), results=results, - aggressive=self.options.aggressive, - indent_size=self.options.indent_size)) + aggressive=self.options.aggressive)) + + if self.options.line_range: + # If number of lines has changed then change line_range. + count = sum(sline.count('\n') + for sline in self.source[start - 1:end]) + self.options.line_range[1] = start + count - 1 + return ''.join(self.source) def _fix_reindent(self, result): @@ -524,6 +566,31 @@ def _fix_reindent(self, result): self.source[line_index] = ' ' * num_indent_spaces + target.lstrip() + def fix_e112(self, result): + """Fix under-indented comments.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + if not target.lstrip().startswith('#'): + # Don't screw with invalid syntax. + return [] + + self.source[line_index] = self.indent_word + target + + def fix_e113(self, result): + """Fix over-indented comments.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + indent = _get_indentation(target) + stripped = target.lstrip() + + if not stripped.startswith('#'): + # Don't screw with invalid syntax. + return [] + + self.source[line_index] = indent[1:] + stripped + def fix_e125(self, result): """Fix indentation undistinguish from the next logical line.""" num_indent_spaces = int(result['info'].split()[1]) @@ -582,17 +649,6 @@ def fix_e225(self, result): def fix_e231(self, result): """Add missing whitespace.""" - # Optimize for comma case. This will fix all commas in the full source - # code in one pass. Don't do this more than once. If it fails the first - # time, there is no point in trying again. - if ',' in result['info'] and not self._ws_comma_done: - self._ws_comma_done = True - original = ''.join(self.source) - new = refactor(original, ['ws_comma']) - if original.strip() != new.strip(): - self.source = [new] - return range(1, 1 + len(original)) - line_index = result['line'] - 1 target = self.source[line_index] offset = result['column'] @@ -795,8 +851,8 @@ def fix_long_line(self, target, previous_line, def fix_e502(self, result): """Remove extraneous escape of newline.""" - line_index = result['line'] - 1 - target = self.source[line_index] + (line_index, _, target) = get_index_offset_contents(result, + self.source) self.source[line_index] = target.rstrip('\n\r \t\\') + '\n' def fix_e701(self, result): @@ -835,14 +891,21 @@ def fix_e702(self, result, logical): second = (_get_indentation(logical_lines[0]) + target[offset:].lstrip(';').lstrip()) - self.source[line_index] = first + '\n' + second + # find inline commnet + inline_comment = None + if '# ' == target[offset:].lstrip(';').lstrip()[:2]: + inline_comment = target[offset:].lstrip(';') + + if inline_comment: + self.source[line_index] = first + inline_comment + else: + self.source[line_index] = first + '\n' + second return [line_index + 1] def fix_e711(self, result): """Fix comparison with None.""" - line_index = result['line'] - 1 - target = self.source[line_index] - offset = result['column'] - 1 + (line_index, offset, target) = get_index_offset_contents(result, + self.source) right_offset = offset + 2 if right_offset >= len(target): @@ -865,17 +928,16 @@ def fix_e711(self, result): self.source[line_index] = ' '.join([left, new_center, right]) def fix_e712(self, result): - """Fix comparison with boolean.""" - line_index = result['line'] - 1 - target = self.source[line_index] - offset = result['column'] - 1 + """Fix (trivial case of) comparison with boolean.""" + (line_index, offset, target) = get_index_offset_contents(result, + self.source) # Handle very easy "not" special cases. - if re.match(r'^\s*if \w+ == False:$', target): - self.source[line_index] = re.sub(r'if (\w+) == False:', + if re.match(r'^\s*if [\w.]+ == False:$', target): + self.source[line_index] = re.sub(r'if ([\w.]+) == False:', r'if not \1:', target, count=1) - elif re.match(r'^\s*if \w+ != True:$', target): - self.source[line_index] = re.sub(r'if (\w+) != True:', + elif re.match(r'^\s*if [\w.]+ != True:$', target): + self.source[line_index] = re.sub(r'if ([\w.]+) != True:', r'if not \1:', target, count=1) else: right_offset = offset + 2 @@ -903,15 +965,55 @@ def fix_e712(self, result): self.source[line_index] = left + new_right + def fix_e713(self, result): + """Fix (trivial case of) non-membership check.""" + (line_index, _, target) = get_index_offset_contents(result, + self.source) + + # Handle very easy case only. + if re.match(r'^\s*if not [\w.]+ in [\w.]+:$', target): + self.source[line_index] = re.sub(r'if not ([\w.]+) in ([\w.]+):', + r'if \1 not in \2:', + target, + count=1) + def fix_w291(self, result): """Remove trailing whitespace.""" fixed_line = self.source[result['line'] - 1].rstrip() self.source[result['line'] - 1] = fixed_line + '\n' + def fix_w391(self, _): + """Remove trailing blank lines.""" + blank_count = 0 + for line in reversed(self.source): + line = line.rstrip() + if line: + break + else: + blank_count += 1 + + original_length = len(self.source) + self.source = self.source[:original_length - blank_count] + return range(1, 1 + original_length) + + +def get_index_offset_contents(result, source): + """Return (line_index, column_offset, line_contents).""" + line_index = result['line'] - 1 + return (line_index, + result['column'] - 1, + source[line_index]) + def get_fixed_long_line(target, previous_line, original, indent_word=' ', max_line_length=79, aggressive=False, experimental=False, verbose=False): + """Break up long line and return result. + + Do this by generating multiple reformatted candidates and then + ranking the candidates to heuristically select the best option. + + """ indent = _get_indentation(target) source = target[len(indent):] assert source.lstrip() == source @@ -930,19 +1032,28 @@ def get_fixed_long_line(target, previous_line, original, # Also sort alphabetically as a tie breaker (for determinism). candidates = sorted( sorted(set(candidates).union([target, original])), - key=lambda x: line_shortening_rank(x, - indent_word, - max_line_length)) + key=lambda x: line_shortening_rank( + x, + indent_word, + max_line_length, + experimental=experimental)) if verbose >= 4: print(('-' * 79 + '\n').join([''] + candidates + ['']), - file=codecs.getwriter('utf-8')(sys.stderr.buffer - if hasattr(sys.stderr, - 'buffer') - else sys.stderr)) + file=wrap_output(sys.stderr, 'utf-8')) if candidates: - return candidates[0] + best_candidate = candidates[0] + # Don't allow things to get longer. + if longest_line_length(best_candidate) > longest_line_length(original): + return None + else: + return best_candidate + + +def longest_line_length(code): + """Return length of longest line.""" + return max(len(line) for line in code.splitlines()) def join_logical_line(logical_line): @@ -978,11 +1089,11 @@ def untokenize_without_newlines(tokens): last_row = end_row last_column = end_column - return text + return text.rstrip() def _find_logical(source_lines): - # make a variable which is the index of all the starts of lines + # Make a variable which is the index of all the starts of lines. logical_start = [] logical_end = [] last_newline = True @@ -1071,7 +1182,7 @@ def split_and_strip_non_empty_lines(text): return [line.strip() for line in text.splitlines() if line.strip()] -def fix_e269(source, aggressive=False): +def fix_e265(source, aggressive=False): # pylint: disable=unused-argument """Format block comments.""" if '#' not in source: # Optimization. @@ -1093,11 +1204,13 @@ def fix_e269(source, aggressive=False): # Normalize beginning if not a shebang. if len(line) > 1: + pos = next((index for index, c in enumerate(line) + if c != '#')) if ( # Leave multiple spaces like '# ' alone. - (line.count('#') > 1 or line[1].isalnum()) + (line[:pos].count('#') > 1 or line[1].isalnum()) and # Leave stylistic outlined blocks alone. - and not line.rstrip().endswith('#') + not line.rstrip().endswith('#') ): line = '# ' + line.lstrip('# \t') @@ -1108,7 +1221,7 @@ def fix_e269(source, aggressive=False): return ''.join(fixed_lines) -def refactor(source, fixer_names, ignore=None): +def refactor(source, fixer_names, ignore=None, filename=''): """Return refactored code using lib2to3. Skip if ignore string is produced in the refactored code. @@ -1117,7 +1230,8 @@ def refactor(source, fixer_names, ignore=None): from lib2to3 import pgen2 try: new_text = refactor_with_2to3(source, - fixer_names=fixer_names) + fixer_names=fixer_names, + filename=filename) except (pgen2.parse.ParseError, SyntaxError, UnicodeDecodeError, @@ -1139,7 +1253,8 @@ def code_to_2to3(select, ignore): return fixes -def fix_2to3(source, aggressive=True, select=None, ignore=None): +def fix_2to3(source, + aggressive=True, select=None, ignore=None, filename=''): """Fix various deprecated code (via lib2to3).""" if not aggressive: return source @@ -1149,7 +1264,8 @@ def fix_2to3(source, aggressive=True, select=None, ignore=None): return refactor(source, code_to_2to3(select=select, - ignore=ignore)) + ignore=ignore), + filename=filename) def fix_w602(source, aggressive=True): @@ -1217,7 +1333,7 @@ def get_diff_text(old, new, filename): text += line # Work around missing newline (http://bugs.python.org/issue2142). - if not line.endswith(newline): + if text and not line.endswith(newline): text += newline + r'\ No newline at end of file' + newline return text @@ -1291,7 +1407,6 @@ def shorten_line(tokens, source, indentation, indent_word, max_line_length, tokens=tokens, source=source, indentation=indentation, - indent_word=indent_word, max_line_length=max_line_length): yield shortened @@ -1429,16 +1544,25 @@ def __repr__(self): ########################################################################### # Public Methods - def add(self, obj, indent_amt): + def add(self, obj, indent_amt, break_after_open_bracket): if isinstance(obj, Atom): self._add_item(obj, indent_amt) return - self._add_container(obj, indent_amt) + self._add_container(obj, indent_amt, break_after_open_bracket) def add_comment(self, item): - self._lines.append(self._Space()) - self._lines.append(self._Space()) + num_spaces = 2 + if len(self._lines) > 1: + if isinstance(self._lines[-1], self._Space): + num_spaces -= 1 + if len(self._lines) > 2: + if isinstance(self._lines[-2], self._Space): + num_spaces -= 1 + + while num_spaces > 0: + self._lines.append(self._Space()) + num_spaces -= 1 self._lines.append(item) def add_indent(self, indent_amt): @@ -1460,8 +1584,8 @@ def add_space_if_needed(self, curr_text, equal=False): return prev_text = unicode(self._prev_item) - prev_prev_text = \ - unicode(self._prev_prev_item) if self._prev_prev_item else '' + prev_prev_text = ( + unicode(self._prev_prev_item) if self._prev_prev_item else '') if ( # The previous item was a keyword or identifier and the current @@ -1494,10 +1618,14 @@ def add_space_if_needed(self, curr_text, equal=False): (self._prev_prev_item.is_name or self._prev_prev_item.is_number or self._prev_prev_item.is_string)) and - prev_text in ('+', '-', '%', '*', '/', '//', '**'))))) + prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in'))))) ): self._lines.append(self._Space()) + def previous_item(self): + """Return the previous non-whitespace item.""" + return self._prev_item + def fits_on_current_line(self, item_extent): return self.current_size() + item_extent <= self._max_line_length @@ -1569,24 +1697,41 @@ def _add_item(self, item, indent_amt): self._bracket_depth -= 1 assert self._bracket_depth >= 0 - def _add_container(self, container, indent_amt): + def _add_container(self, container, indent_amt, break_after_open_bracket): + actual_indent = indent_amt + 1 + if ( unicode(self._prev_item) != '=' and not self.line_empty() and not self.fits_on_current_line( - container.size + self._bracket_depth + 2) and - - # Don't split before the opening bracket of a call. - (unicode(container)[0] != '(' or not self._prev_item.is_name) + container.size + self._bracket_depth + 2) ): - # If the container doesn't fit on the current line and the current - # line isn't empty, place the container on the next line. - self._lines.append(self._LineBreak()) - self._lines.append(self._Indent(indent_amt)) + + if unicode(container)[0] == '(' and self._prev_item.is_name: + # Don't split before the opening bracket of a call. + break_after_open_bracket = True + actual_indent = indent_amt + 4 + elif ( + break_after_open_bracket or + unicode(self._prev_item) not in '([{' + ): + # If the container doesn't fit on the current line and the + # current line isn't empty, place the container on the next + # line. + self._lines.append(self._LineBreak()) + self._lines.append(self._Indent(indent_amt)) + break_after_open_bracket = False + else: + actual_indent = self.current_size() + 1 + break_after_open_bracket = False + + if isinstance(container, (ListComprehension, IfExpression)): + actual_indent = indent_amt # Increase the continued indentation only if recursing on a # container. - container.reflow(self, ' ' * (indent_amt + 1)) + container.reflow(self, ' ' * actual_indent, + break_after_open_bracket=break_after_open_bracket) def _prevent_default_initializer_splitting(self, item, indent_amt): """Prevent splitting between a default initializer. @@ -1636,9 +1781,15 @@ def _split_after_delimiter(self, item, indent_amt): last_space = None for item in reversed(self._lines): + if ( + last_space and + (not isinstance(item, Atom) or not item.is_colon) + ): + break + else: + last_space = None if isinstance(item, self._Space): last_space = item - break if isinstance(item, (self._LineBreak, self._Indent)): return @@ -1693,8 +1844,12 @@ def __repr__(self): def __len__(self): return self.size - def reflow(self, reflowed_lines, continued_indent, extent, - break_after_open_bracket=False): + def reflow( + self, reflowed_lines, continued_indent, extent, + break_after_open_bracket=False, + is_list_comp_or_if_expr=False, + next_is_dot=False + ): if self._atom.token_type == tokenize.COMMENT: reflowed_lines.add_comment(self) return @@ -1705,9 +1860,16 @@ def reflow(self, reflowed_lines, continued_indent, extent, # Some atoms will need an extra 1-sized space token after them. total_size += 1 + prev_item = reflowed_lines.previous_item() if ( + not is_list_comp_or_if_expr and not reflowed_lines.fits_on_current_line(total_size) and - not reflowed_lines.line_empty() + not (next_is_dot and + reflowed_lines.fits_on_current_line(self.size + 1)) and + not reflowed_lines.line_empty() and + not self.is_colon and + not (prev_item and prev_item.is_name and + unicode(self) == '(') ): # Start a new line if there is already something on the line and # adding this atom would make it go over the max line length. @@ -1715,7 +1877,8 @@ def reflow(self, reflowed_lines, continued_indent, extent, else: reflowed_lines.add_space_if_needed(unicode(self)) - reflowed_lines.add(self, len(continued_indent)) + reflowed_lines.add(self, len(continued_indent), + break_after_open_bracket) def emit(self): return self.__repr__() @@ -1788,14 +1951,27 @@ def __getitem__(self, idx): def reflow(self, reflowed_lines, continued_indent, break_after_open_bracket=False): + last_was_container = False for (index, item) in enumerate(self._items): + next_item = get_item(self._items, index + 1) + if isinstance(item, Atom): + is_list_comp_or_if_expr = ( + isinstance(self, (ListComprehension, IfExpression))) item.reflow(reflowed_lines, continued_indent, - self._get_extent(index)) + self._get_extent(index), + is_list_comp_or_if_expr=is_list_comp_or_if_expr, + next_is_dot=(next_item and + unicode(next_item) == '.')) + if last_was_container and item.is_comma: + reflowed_lines.add_line_break(continued_indent) + last_was_container = False else: # isinstance(item, Container) - reflowed_lines.add(item, len(continued_indent)) + reflowed_lines.add(item, len(continued_indent), + break_after_open_bracket) + last_was_container = not isinstance(item, (ListComprehension, + IfExpression)) - next_item = get_item(self._items, index + 1) if ( break_after_open_bracket and index == 0 and # Prefer to keep empty containers together instead of @@ -1809,12 +1985,14 @@ def reflow(self, reflowed_lines, continued_indent, else: next_next_item = get_item(self._items, index + 2) if ( - unicode(item) not in '.%' and next_item and - next_next_item and unicode(next_item) != ':' and - not isinstance(next_next_item, Atom) and + unicode(item) not in ['.', '%', 'in'] and + next_item and not isinstance(next_item, Container) and + unicode(next_item) != ':' and + next_next_item and (not isinstance(next_next_item, Atom) or + unicode(next_item) == 'not') and not reflowed_lines.line_empty() and not reflowed_lines.fits_on_current_line( - next_item.size + next_next_item.size + 2) + self._get_extent(index + 1) + 2) ): reflowed_lines.add_line_break(continued_indent) @@ -1822,14 +2000,37 @@ def _get_extent(self, index): """The extent of the full element. E.g., the length of a function call or keyword. + """ extent = 0 + prev_item = get_item(self._items, index - 1) + seen_dot = prev_item and unicode(prev_item) == '.' while index < len(self._items): item = get_item(self._items, index) - if unicode(item) not in '.=' and not item.is_name: - break - extent += len(item) index += 1 + + if isinstance(item, (ListComprehension, IfExpression)): + break + + if isinstance(item, Container): + if prev_item and prev_item.is_name: + if seen_dot: + extent += 1 + else: + extent += item.size + + prev_item = item + continue + elif (unicode(item) not in ['.', '=', ':', 'not'] and + not item.is_name and not item.is_string): + break + + if unicode(item) == '.': + seen_dot = True + + extent += item.size + prev_item = item + return extent @property @@ -1908,6 +2109,15 @@ class ListComprehension(Container): """A high-level representation of a list comprehension.""" + @property + def size(self): + length = 0 + for item in self._items: + if isinstance(item, IfExpression): + break + length += item.size + return length + class IfExpression(Container): @@ -1970,6 +2180,8 @@ def _parse_container(tokens, index, for_or_if=None): index += 1 + return (None, None) + def _parse_tokens(tokens): """Parse the tokens. @@ -1993,6 +2205,8 @@ def _parse_tokens(tokens): if tok.token_string in '([{': (container, index) = _parse_container(tokens, index) + if not container: + return None parsed_tokens.append(container) else: parsed_tokens.append(Atom(tok)) @@ -2002,7 +2216,7 @@ def _parse_tokens(tokens): return parsed_tokens -def _reflow_lines(parsed_tokens, indentation, indent_word, max_line_length, +def _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line): """Reflow the lines so that it looks nice.""" @@ -2015,7 +2229,20 @@ def _reflow_lines(parsed_tokens, indentation, indent_word, max_line_length, break_after_open_bracket = not start_on_prefix_line lines = ReformattedLines(max_line_length) - lines.add_indent(len(indentation)) + lines.add_indent(len(indentation.lstrip('\r\n'))) + + if not start_on_prefix_line: + # If splitting after the opening bracket will cause the first element + # to be aligned weirdly, don't try it. + first_token = get_item(parsed_tokens, 0) + second_token = get_item(parsed_tokens, 1) + + if ( + first_token and second_token and + unicode(second_token)[0] == '(' and + len(indentation) + len(first_token) + 1 == len(continued_indent) + ): + return None for item in parsed_tokens: lines.add_space_if_needed(unicode(item), equal=True) @@ -2031,7 +2258,7 @@ def _reflow_lines(parsed_tokens, indentation, indent_word, max_line_length, return lines.emit() -def _shorten_line_at_tokens_new(tokens, source, indentation, indent_word, +def _shorten_line_at_tokens_new(tokens, source, indentation, max_line_length): """Shorten the line taking its length into account. @@ -2048,14 +2275,14 @@ def _shorten_line_at_tokens_new(tokens, source, indentation, indent_word, if parsed_tokens: # Perform two reflows. The first one starts on the same line as the # prefix. The second starts on the line after the prefix. - fixed = _reflow_lines(parsed_tokens, indentation, indent_word, - max_line_length, start_on_prefix_line=True) - if check_syntax(normalize_multiline(fixed.lstrip())): + fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, + start_on_prefix_line=True) + if fixed and check_syntax(normalize_multiline(fixed.lstrip())): yield fixed - fixed = _reflow_lines(parsed_tokens, indentation, indent_word, - max_line_length, start_on_prefix_line=False) - if check_syntax(normalize_multiline(fixed.lstrip())): + fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, + start_on_prefix_line=False) + if fixed and check_syntax(normalize_multiline(fixed.lstrip())): yield fixed @@ -2183,6 +2410,8 @@ def normalize_multiline(line): return line + 'def _(): pass' elif line.startswith('class '): return line + ' pass' + elif line.startswith(('if ', 'elif ', 'for ', 'while ')): + return line + ' pass' else: return line @@ -2208,9 +2437,12 @@ def __init__(self, options): super(QuietReport, self).__init__(options) self.__full_error_results = [] - def error(self, line_number, offset, text, _): + def error(self, line_number, offset, text, check): """Collect errors.""" - code = super(QuietReport, self).error(line_number, offset, text, _) + code = super(QuietReport, self).error(line_number, + offset, + text, + check) if code: self.__full_error_results.append( {'id': code, @@ -2283,8 +2515,6 @@ def run(self, indent_size=DEFAULT_INDENT_SIZE): return self.input_text # Remove trailing empty lines. lines = self.lines - while lines and lines[-1] == '\n': - lines.pop() # Sentinel. stats.append((len(lines), 0)) # Map count of leading spaces to # we want. @@ -2315,8 +2545,8 @@ def run(self, indent_size=DEFAULT_INDENT_SIZE): if have == _leading_space_count(lines[jline]): want = jlevel * indent_size break - if want < 0: # Maybe it's a hanging - # comment like this one, + if want < 0: # Maybe it's a hanging + # comment like this one, # in which case we should shift it like its base # line got shifted. for j in range(i - 1, -1, -1): @@ -2370,8 +2600,8 @@ def _reindent_stats(tokens): our headache! """ - find_stmt = 1 # next token begins a fresh stmt? - level = 0 # current indent level + find_stmt = 1 # Next token begins a fresh stmt? + level = 0 # Current indent level. stats = [] for t in tokens: @@ -2396,8 +2626,8 @@ def _reindent_stats(tokens): elif token_type == tokenize.COMMENT: if find_stmt: stats.append((sline, -1)) - # but we're still looking for a new stmt, so leave - # find_stmt alone + # But we're still looking for a new stmt, so leave + # find_stmt alone. elif token_type == tokenize.NL: pass @@ -2407,7 +2637,7 @@ def _reindent_stats(tokens): # must be the first token of the next program statement, or an # ENDMARKER. find_stmt = 0 - if line: # not endmarker + if line: # Not endmarker. stats.append((sline, level)) return stats @@ -2421,7 +2651,7 @@ def _leading_space_count(line): return i -def refactor_with_2to3(source_text, fixer_names): +def refactor_with_2to3(source_text, fixer_names, filename=''): """Use lib2to3 to refactor the source. Return the refactored source code. @@ -2433,7 +2663,8 @@ def refactor_with_2to3(source_text, fixer_names): from lib2to3.pgen2 import tokenize as lib2to3_tokenize try: - return unicode(tool.refactor_string(source_text, name='')) + # The name parameter is necessary particularly for the "import" fixer. + return unicode(tool.refactor_string(source_text, name=filename)) except lib2to3_tokenize.TokenError: return source_text @@ -2446,7 +2677,7 @@ def check_syntax(code): return False -def filter_results(source, results, aggressive, indent_size): +def filter_results(source, results, aggressive): """Filter out spurious reports from pep8. If aggressive is True, we allow possibly unsafe fixes (E711, E712). @@ -2459,6 +2690,8 @@ def filter_results(source, results, aggressive, indent_size): commented_out_code_line_numbers = commented_out_code_lines(source) + has_e901 = any(result['id'].lower() == 'e901' for result in results) + for r in results: issue_id = r['id'].lower() @@ -2483,13 +2716,20 @@ def filter_results(source, results, aggressive, indent_size): continue if aggressive <= 1: - if issue_id.startswith(('e712', )): + if issue_id.startswith(('e712', 'e713')): continue if r['line'] in commented_out_code_line_numbers: if issue_id.startswith(('e26', 'e501')): continue + # Do not touch indentation if there is a token error caused by + # incomplete multi-line statement. Otherwise, we risk screwing up the + # indentation. + if has_e901: + if issue_id.startswith(('e1', 'e7')): + continue + yield r @@ -2582,7 +2822,6 @@ def shorten_comment(line, max_line_length, last_comment=False): # Trim comments that end with things like --------- return line[:max_line_length] + '\n' elif last_comment and re.match(r'\s*#+\s*\w+', line): - import textwrap split_lines = textwrap.wrap(line.lstrip(' \t#'), initial_indent=indentation, subsequent_indent=indentation, @@ -2624,18 +2863,45 @@ def code_match(code, select, ignore): return True -def fix_code(source, options=None): - """Return fixed source code.""" - if not options: - options = parse_args(['']) +def fix_code(source, options=None, encoding=None, apply_config=False): + """Return fixed source code. + + "encoding" will be used to decode "source" if it is a byte string. + + """ + options = _get_options(options, apply_config) if not isinstance(source, unicode): - source = source.decode(locale.getpreferredencoding(False)) + source = source.decode(encoding or get_encoding()) sio = io.StringIO(source) return fix_lines(sio.readlines(), options=options) +def _get_options(raw_options, apply_config): + """Return parsed options.""" + if not raw_options: + return parse_args([''], apply_config=apply_config) + + if isinstance(raw_options, dict): + options = parse_args([''], apply_config=apply_config) + for name, value in raw_options.items(): + if not hasattr(options, name): + raise ValueError("No such option '{}'".format(name)) + + # Check for very basic type errors. + expected_type = type(getattr(options, name)) + if not isinstance(expected_type, (str, unicode)): + if isinstance(value, (str, unicode)): + raise ValueError( + "Option '{}' should not be a string".format(name)) + setattr(options, name, value) + else: + options = raw_options + + return options + + def fix_lines(source_lines, options, filename=''): """Return fixed source code.""" # Transform everything to line feed. Then change them back to original @@ -2647,10 +2913,13 @@ def fix_lines(source_lines, options, filename=''): previous_hashes = set() if options.line_range: + # Disable "apply_local_fixes()" for now due to issue #175. fixed_source = tmp_source else: # Apply global fixes only once (for efficiency). - fixed_source = apply_global_fixes(tmp_source, options) + fixed_source = apply_global_fixes(tmp_source, + options, + filename=filename) passes = 0 long_line_ignore_cache = set() @@ -2675,9 +2944,9 @@ def fix_lines(source_lines, options, filename=''): return ''.join(normalize_line_endings(sio.readlines(), original_newline)) -def fix_file(filename, options=None, output=None): +def fix_file(filename, options=None, output=None, apply_config=False): if not options: - options = parse_args([filename]) + options = parse_args([filename], apply_config=apply_config) original_source = readlines_from_file(filename) @@ -2687,11 +2956,7 @@ def fix_file(filename, options=None, output=None): encoding = detect_encoding(filename) if output: - output = codecs.getwriter(encoding)(output.buffer - if hasattr(output, 'buffer') - else output) - - output = LineEndingWrapper(output) + output = LineEndingWrapper(wrap_output(output, encoding=encoding)) fixed_source = fix_lines(fixed_source, options, filename=filename) @@ -2719,7 +2984,7 @@ def fix_file(filename, options=None, output=None): def global_fixes(): """Yield multiple (code, function) tuples.""" - for function in globals().values(): + for function in list(globals().values()): if inspect.isfunction(function): arguments = inspect.getargspec(function)[0] if arguments[:1] != ['source']: @@ -2730,21 +2995,23 @@ def global_fixes(): yield (code, function) -def apply_global_fixes(source, options): +def apply_global_fixes(source, options, where='global', filename=''): """Run global fixes on source code. These are fixes that only need be done once (unlike those in FixPEP8, which are dependent on pep8). """ - if code_match('E101', select=options.select, ignore=options.ignore): + if any(code_match(code, select=options.select, ignore=options.ignore) + for code in ['E101', 'E111']): source = reindent(source, indent_size=options.indent_size) for (code, function) in global_fixes(): if code_match(code, select=options.select, ignore=options.ignore): if options.verbose: - print('---> Applying global fix for {0}'.format(code.upper()), + print('---> Applying {0} fix for {1}'.format(where, + code.upper()), file=sys.stderr) source = function(source, aggressive=options.aggressive) @@ -2752,7 +3019,8 @@ def apply_global_fixes(source, options): source = fix_2to3(source, aggressive=options.aggressive, select=options.select, - ignore=options.ignore) + ignore=options.ignore, + filename=filename) return source @@ -2784,48 +3052,57 @@ def create_parser(): prog='autopep8') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) - parser.add_argument('-v', '--verbose', action='count', dest='verbose', + parser.add_argument('-v', '--verbose', action='count', default=0, help='print verbose messages; ' - 'multiple -v result in more verbose messages') - parser.add_argument('-d', '--diff', action='store_true', dest='diff', + 'multiple -v result in more verbose messages') + parser.add_argument('-d', '--diff', action='store_true', help='print the diff for the fixed source') parser.add_argument('-i', '--in-place', action='store_true', help='make changes to files in place') + parser.add_argument('--global-config', metavar='filename', + default=DEFAULT_CONFIG, + help='path to a global pep8 config file; if this file ' + 'does not exist then this is ignored ' + '(default: {0})'.format(DEFAULT_CONFIG)) + parser.add_argument('--ignore-local-config', action='store_true', + help="don't look for and apply local config files; " + 'if not passed, defaults are updated with any ' + "config files in the project's root directory") parser.add_argument('-r', '--recursive', action='store_true', help='run recursively over directories; ' - 'must be used with --in-place or --diff') + 'must be used with --in-place or --diff') parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1, help='number of parallel jobs; ' - 'match CPU count if value is less than 1') + 'match CPU count if value is less than 1') parser.add_argument('-p', '--pep8-passes', metavar='n', default=-1, type=int, help='maximum number of additional pep8 passes ' - '(default: infinite)') + '(default: infinite)') parser.add_argument('-a', '--aggressive', action='count', default=0, help='enable non-whitespace changes; ' - 'multiple -a result in more aggressive changes') + 'multiple -a result in more aggressive changes') parser.add_argument('--experimental', action='store_true', help='enable experimental fixes') parser.add_argument('--exclude', metavar='globs', help='exclude file/directory names that match these ' - 'comma-separated globs') + 'comma-separated globs') parser.add_argument('--list-fixes', action='store_true', help='list codes for fixes; ' 'used by --ignore and --select') parser.add_argument('--ignore', metavar='errors', default='', help='do not fix these errors/warnings ' - '(default: {0})'.format(DEFAULT_IGNORE)) + '(default: {0})'.format(DEFAULT_IGNORE)) parser.add_argument('--select', metavar='errors', default='', help='fix only these errors/warnings (e.g. E4,W)') parser.add_argument('--max-line-length', metavar='n', default=79, type=int, help='set maximum allowed line length ' - '(default: %(default)s)') - parser.add_argument('--range', metavar='line', dest='line_range', + '(default: %(default)s)') + parser.add_argument('--line-range', '--range', metavar='line', default=None, type=int, nargs=2, help='only fix errors found within this inclusive ' - 'range of line numbers (e.g. 1 99); ' - 'line numbers are indexed at 1') + 'range of line numbers (e.g. 1 99); ' + 'line numbers are indexed at 1') parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE, type=int, metavar='n', help='number of spaces per indent level ' @@ -2836,7 +3113,7 @@ def create_parser(): return parser -def parse_args(arguments): +def parse_args(arguments, apply_config=False): """Parse command-line options.""" parser = create_parser() args = parser.parse_args(arguments) @@ -2846,6 +3123,11 @@ def parse_args(arguments): args.files = [decode_filename(name) for name in args.files] + if apply_config: + parser = read_config(args, parser) + args = parser.parse_args(arguments) + args.files = [decode_filename(name) for name in args.files] + if '-' in args.files: if len(args.files) > 1: parser.error('cannot mix stdin and regular files') @@ -2867,9 +3149,6 @@ def parse_args(arguments): if args.recursive and not (args.in_place or args.diff): parser.error('--recursive must be used with --in-place or --diff') - if args.exclude and not args.recursive: - parser.error('--exclude is only relevant when used with --recursive') - if args.in_place and args.diff: parser.error('--in-place and --diff are mutually exclusive') @@ -2877,19 +3156,19 @@ def parse_args(arguments): parser.error('--max-line-length must be greater than 0') if args.select: - args.select = args.select.split(',') + args.select = _split_comma_separated(args.select) if args.ignore: - args.ignore = args.ignore.split(',') + args.ignore = _split_comma_separated(args.ignore) elif not args.select: if args.aggressive: # Enable everything by default if aggressive. args.select = ['E', 'W'] else: - args.ignore = DEFAULT_IGNORE.split(',') + args.ignore = _split_comma_separated(DEFAULT_IGNORE) if args.exclude: - args.exclude = args.exclude.split(',') + args.exclude = _split_comma_separated(args.exclude) else: args.exclude = [] @@ -2902,9 +3181,54 @@ def parse_args(arguments): if args.jobs > 1 and not args.in_place: parser.error('parallel jobs requires --in-place') + if args.line_range: + if args.line_range[0] <= 0: + parser.error('--range must be positive numbers') + if args.line_range[0] > args.line_range[1]: + parser.error('First value of --range should be less than or equal ' + 'to the second') + return args +def read_config(args, parser): + """Read both user configuration and local configuration.""" + try: + from configparser import ConfigParser as SafeConfigParser + from configparser import Error + except ImportError: + from ConfigParser import SafeConfigParser + from ConfigParser import Error + + config = SafeConfigParser() + + try: + config.read(args.global_config) + + if not args.ignore_local_config: + parent = tail = args.files and os.path.abspath( + os.path.commonprefix(args.files)) + while tail: + if config.read([os.path.join(parent, fn) + for fn in PROJECT_CONFIG]): + break + (parent, tail) = os.path.split(parent) + + defaults = dict((k.lstrip('-').replace('-', '_'), v) + for k, v in config.items('pep8')) + parser.set_defaults(**defaults) + except Error: + # Ignore for now. + pass + + return parser + + +def _split_comma_separated(string): + """Return a set of strings.""" + return set(text.strip() for text in string.split(',') if text.strip()) + + def decode_filename(filename): """Return Unicode filename.""" if isinstance(filename, unicode): @@ -2946,7 +3270,8 @@ def docstring_summary(docstring): return docstring.split('\n')[0] -def line_shortening_rank(candidate, indent_word, max_line_length): +def line_shortening_rank(candidate, indent_word, max_line_length, + experimental=False): """Return rank of candidate. This is for sorting candidates. @@ -2956,19 +3281,25 @@ def line_shortening_rank(candidate, indent_word, max_line_length): return 0 rank = 0 - lines = candidate.split('\n') + lines = candidate.rstrip().split('\n') offset = 0 if ( not lines[0].lstrip().startswith('#') and lines[0].rstrip()[-1] not in '([{' ): - for symbol in '([{': - offset = max(offset, 1 + lines[0].find(symbol)) + for (opening, closing) in ('()', '[]', '{}'): + # Don't penalize empty containers that aren't split up. Things like + # this "foo(\n )" aren't particularly good. + opening_loc = lines[0].find(opening) + closing_loc = lines[0].find(closing) + if opening_loc >= 0: + if closing_loc < 0 or closing_loc != opening_loc + 1: + offset = max(offset, 1 + opening_loc) current_longest = max(offset + len(x.strip()) for x in lines) - rank += 2 * max(0, current_longest - max_line_length) + rank += 4 * max(0, current_longest - max_line_length) rank += len(lines) @@ -3001,18 +3332,38 @@ def line_shortening_rank(candidate, indent_word, max_line_length): if current_line == bad_start: rank += 1000 - if current_line.endswith(('(', '[', '{')): + if ( + current_line.endswith(('.', '%', '+', '-', '/')) and + "': " in current_line + ): + rank += 1000 + + if current_line.endswith(('(', '[', '{', '.')): # Avoid lonely opening. They result in longer lines. if len(current_line) <= len(indent_word): rank += 100 - # Avoid ugliness of ", (\n". - if current_line.endswith(','): + # Avoid the ugliness of ", (\n". + if ( + current_line.endswith('(') and + current_line[:-1].rstrip().endswith(',') + ): + rank += 100 + + # Also avoid the ugliness of "foo.\nbar" + if current_line.endswith('.'): rank += 100 if has_arithmetic_operator(current_line): rank += 100 + # Avoid breaking at unary operators. + if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')): + rank += 1000 + + if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')): + rank += 1000 + if current_line.endswith(('%', '(', '[', '{')): rank -= 20 @@ -3044,12 +3395,16 @@ def line_shortening_rank(candidate, indent_word, max_line_length): if total_len < max_line_length: rank += 10 else: - rank += 1 + rank += 100 if experimental else 1 # Prefer breaking at commas rather than colon. if ',' in current_line and current_line.endswith(':'): rank += 10 + # Avoid splitting dictionaries between key and value. + if current_line.endswith(':'): + rank += 100 + rank += 10 * count_unbalanced_brackets(current_line) return max(0, rank) @@ -3132,6 +3487,8 @@ def match_file(filename, exclude): for pattern in exclude: if fnmatch.fnmatch(base_name, pattern): return False + if fnmatch.fnmatch(filename, pattern): + return False if not os.path.isdir(filename) and not is_python_file(filename): return False @@ -3213,8 +3570,23 @@ def is_probably_part_of_multiline(line): ) -def main(): - """Tool main.""" +def wrap_output(output, encoding): + """Return output with specified encoding.""" + return codecs.getwriter(encoding)(output.buffer + if hasattr(output, 'buffer') + else output) + + +def get_encoding(): + """Return preferred encoding.""" + return locale.getpreferredencoding() or sys.getdefaultencoding() + + +def main(argv=None, apply_config=True): + """Command-line entry.""" + if argv is None: + argv = sys.argv + try: # Exit on broken pipe. signal.signal(signal.SIGPIPE, signal.SIG_DFL) @@ -3223,7 +3595,7 @@ def main(): pass try: - args = parse_args(sys.argv[1:]) + args = parse_args(argv[1:], apply_config=apply_config) if args.list_fixes: for code, description in sorted(supported_fixes()): @@ -3234,9 +3606,12 @@ def main(): if args.files == ['-']: assert not args.in_place + encoding = sys.stdin.encoding or get_encoding() + # LineEndingWrapper is unnecessary here due to the symmetry between # standard in and standard out. - sys.stdout.write(fix_code(sys.stdin.read(), args)) + wrap_output(sys.stdout, encoding=encoding).write( + fix_code(sys.stdin.read(), args, encoding=encoding)) else: if args.in_place or args.diff: args.files = list(set(args.files)) diff --git a/pymode/environment.py b/pymode/environment.py index 43246cea..c146ea6e 100644 --- a/pymode/environment.py +++ b/pymode/environment.py @@ -1,40 +1,40 @@ -""" Define interfaces. """ +"""Define interfaces.""" from __future__ import print_function -import vim import json -import time import os.path +import time +import vim # noqa from ._compat import PY2 class VimPymodeEnviroment(object): - """ Vim User interface. """ + """Vim User interface.""" prefix = '[Pymode]' def __init__(self): - """ Init VIM environment. """ + """Init VIM environment.""" self.current = vim.current self.options = dict(encoding=vim.eval('&enc')) self.options['debug'] = self.var('g:pymode_debug', True) @property def curdir(self): - """ Return current working directory. """ + """Return current working directory.""" return self.var('getcwd()') @property def curbuf(self): - """ Return current buffer. """ + """Return current buffer.""" return self.current.buffer @property def cursor(self): - """ Return current window position. + """Return current window position. :return tuple: (row, col) @@ -43,12 +43,12 @@ def cursor(self): @property def source(self): - """ Return source of current buffer. """ + """Return source of current buffer.""" return "\n".join(self.lines) @property def lines(self): - """ Iterate by lines in current file. + """Iterate by lines in current file. :return list: @@ -60,7 +60,7 @@ def lines(self): @staticmethod def var(name, to_bool=False, silence=False): - """ Get vim variable. + """Get vim variable. :return vimobj: @@ -81,7 +81,7 @@ def var(name, to_bool=False, silence=False): @staticmethod def message(msg, history=False): - """ Show message to user. + """Show message to user. :return: :None @@ -92,7 +92,7 @@ def message(msg, history=False): return vim.command('call pymode#wide_message("%s")' % str(msg)) def user_input(self, msg, default=''): - """ Return user input or default. + """Return user input or default. :return str: @@ -112,7 +112,7 @@ def user_input(self, msg, default=''): return input_str or default def user_confirm(self, msg, yes=False): - """ Get user confirmation. + """Get user confirmation. :return bool: @@ -122,7 +122,7 @@ def user_confirm(self, msg, yes=False): return action and 'yes'.startswith(action) def user_input_choices(self, msg, *options): - """ Get one of many options. + """Get one of many options. :return str: A choosen option @@ -148,24 +148,24 @@ def user_input_choices(self, msg, *options): @staticmethod def error(msg): - """ Show error to user. """ + """Show error to user.""" vim.command('call pymode#error("%s")' % str(msg)) def debug(self, msg, *args): - """ Print debug information. """ + """Print debug information.""" if self.options.get('debug'): print("%s %s [%s]" % ( int(time.time()), msg, ', '.join([str(a) for a in args]))) def stop(self, value=None): - """ Break Vim function. """ + """Break Vim function.""" cmd = 'return' if value is not None: cmd += ' ' + self.prepare_value(value) vim.command(cmd) def catch_exceptions(self, func): - """ Decorator. Make execution more silence. + """Decorator. Make execution more silence. :return func: @@ -181,19 +181,19 @@ def _wrapper(*args, **kwargs): return _wrapper def run(self, name, *args): - """ Run vim function. """ + """Run vim function.""" vim.command('call %s(%s)' % (name, ", ".join([ self.prepare_value(a) for a in args ]))) def let(self, name, value): - """ Set variable. """ + """Set variable.""" cmd = 'let %s = %s' % (name, self.prepare_value(value)) self.debug(cmd) vim.command(cmd) def prepare_value(self, value, dumps=True): - """ Decode bstr to vim encoding. + """Decode bstr to vim encoding. :return unicode string: @@ -207,7 +207,7 @@ def prepare_value(self, value, dumps=True): return value def get_offset_params(self, cursor=None, base=""): - """ Calculate current offset. + """Calculate current offset. :return tuple: (source, offset) @@ -228,11 +228,11 @@ def get_offset_params(self, cursor=None, base=""): @staticmethod def goto_line(line): - """ Go to line. """ + """Go to line.""" vim.command('normal %sggzz' % line) def goto_file(self, path, cmd='e', force=False): - """ Function description. """ + """Open file by path.""" if force or os.path.abspath(path) != self.curbuf.name: self.debug('read', path) if ' ' in path and os.name == 'posix': @@ -241,7 +241,7 @@ def goto_file(self, path, cmd='e', force=False): @staticmethod def goto_buffer(bufnr): - """ Open buffer. """ + """Open buffer.""" if str(bufnr) != '-1': vim.command('buffer %s' % bufnr) diff --git a/pymode/libs/_markerlib/__init__.py b/pymode/libs/_markerlib/__init__.py new file mode 100644 index 00000000..e2b237b1 --- /dev/null +++ b/pymode/libs/_markerlib/__init__.py @@ -0,0 +1,16 @@ +try: + import ast + from _markerlib.markers import default_environment, compile, interpret +except ImportError: + if 'ast' in globals(): + raise + def default_environment(): + return {} + def compile(marker): + def marker_fn(environment=None, override=None): + # 'empty markers are True' heuristic won't install extra deps. + return not marker.strip() + marker_fn.__doc__ = marker + return marker_fn + def interpret(marker, environment=None, override=None): + return compile(marker)() diff --git a/pymode/libs/_markerlib/markers.py b/pymode/libs/_markerlib/markers.py new file mode 100644 index 00000000..fa837061 --- /dev/null +++ b/pymode/libs/_markerlib/markers.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +"""Interpret PEP 345 environment markers. + +EXPR [in|==|!=|not in] EXPR [or|and] ... + +where EXPR belongs to any of those: + + python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1]) + python_full_version = sys.version.split()[0] + os.name = os.name + sys.platform = sys.platform + platform.version = platform.version() + platform.machine = platform.machine() + platform.python_implementation = platform.python_implementation() + a free string, like '2.6', or 'win32' +""" + +__all__ = ['default_environment', 'compile', 'interpret'] + +import ast +import os +import platform +import sys +import weakref + +_builtin_compile = compile + +try: + from platform import python_implementation +except ImportError: + if os.name == "java": + # Jython 2.5 has ast module, but not platform.python_implementation() function. + def python_implementation(): + return "Jython" + else: + raise + + +# restricted set of variables +_VARS = {'sys.platform': sys.platform, + 'python_version': '%s.%s' % sys.version_info[:2], + # FIXME parsing sys.platform is not reliable, but there is no other + # way to get e.g. 2.7.2+, and the PEP is defined with sys.version + 'python_full_version': sys.version.split(' ', 1)[0], + 'os.name': os.name, + 'platform.version': platform.version(), + 'platform.machine': platform.machine(), + 'platform.python_implementation': python_implementation(), + 'extra': None # wheel extension + } + +for var in list(_VARS.keys()): + if '.' in var: + _VARS[var.replace('.', '_')] = _VARS[var] + +def default_environment(): + """Return copy of default PEP 385 globals dictionary.""" + return dict(_VARS) + +class ASTWhitelist(ast.NodeTransformer): + def __init__(self, statement): + self.statement = statement # for error messages + + ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str) + # Bool operations + ALLOWED += (ast.And, ast.Or) + # Comparison operations + ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn) + + def visit(self, node): + """Ensure statement only contains allowed nodes.""" + if not isinstance(node, self.ALLOWED): + raise SyntaxError('Not allowed in environment markers.\n%s\n%s' % + (self.statement, + (' ' * node.col_offset) + '^')) + return ast.NodeTransformer.visit(self, node) + + def visit_Attribute(self, node): + """Flatten one level of attribute access.""" + new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx) + return ast.copy_location(new_node, node) + +def parse_marker(marker): + tree = ast.parse(marker, mode='eval') + new_tree = ASTWhitelist(marker).generic_visit(tree) + return new_tree + +def compile_marker(parsed_marker): + return _builtin_compile(parsed_marker, '', 'eval', + dont_inherit=True) + +_cache = weakref.WeakValueDictionary() + +def compile(marker): + """Return compiled marker as a function accepting an environment dict.""" + try: + return _cache[marker] + except KeyError: + pass + if not marker.strip(): + def marker_fn(environment=None, override=None): + """""" + return True + else: + compiled_marker = compile_marker(parse_marker(marker)) + def marker_fn(environment=None, override=None): + """override updates environment""" + if override is None: + override = {} + if environment is None: + environment = default_environment() + environment.update(override) + return eval(compiled_marker, environment) + marker_fn.__doc__ = marker + _cache[marker] = marker_fn + return _cache[marker] + +def interpret(marker, environment=None): + return compile(marker)(environment) diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/__init__.py b/pymode/libs/astroid/__init__.py similarity index 89% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/__init__.py rename to pymode/libs/astroid/__init__.py index 19c80902..d4fd12c5 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/__init__.py +++ b/pymode/libs/astroid/__init__.py @@ -79,6 +79,9 @@ class AsStringRegexpPredicate(object): If specified, the second argument is an `attrgetter` expression that will be applied on the node first to get the actual node on which `as_string` should be called. + + WARNING: This can be fairly slow, as it has to convert every AST node back + to Python code; you should consider examining the AST directly instead. """ def __init__(self, regexp, expression=None): self.regexp = re.compile(regexp) @@ -98,13 +101,23 @@ def inference_tip(infer_function): .. sourcecode:: python MANAGER.register_transform(CallFunc, inference_tip(infer_named_tuple), - AsStringRegexpPredicate('namedtuple', 'func')) + predicate) """ def transform(node, infer_function=infer_function): node._explicit_inference = infer_function return node return transform + +def register_module_extender(manager, module_name, get_extension_mod): + def transform(node): + extension_module = get_extension_mod() + for name, obj in extension_module.locals.items(): + node.locals[name] = obj + + manager.register_transform(Module, transform, lambda n: n.name == module_name) + + # load brain plugins from os import listdir from os.path import join, dirname diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/__pkginfo__.py b/pymode/libs/astroid/__pkginfo__.py similarity index 78% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/__pkginfo__.py rename to pymode/libs/astroid/__pkginfo__.py index 85398ff1..3fb45aa4 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/__pkginfo__.py +++ b/pymode/libs/astroid/__pkginfo__.py @@ -16,33 +16,27 @@ # You should have received a copy of the GNU Lesser General Public License along # with astroid. If not, see . """astroid packaging information""" - distname = 'astroid' modname = 'astroid' -numversion = (1, 1, 1) +numversion = (1, 3, 8) version = '.'.join([str(num) for num in numversion]) -install_requires = ['logilab-common >= 0.60.0'] +install_requires = ['logilab-common>=0.63.0', 'six'] license = 'LGPL' author = 'Logilab' -author_email = 'python-projects@lists.logilab.org' +author_email = 'pylint-dev@lists.logilab.org' mailinglist = "mailto://%s" % author_email web = 'http://bitbucket.org/logilab/astroid' -description = "rebuild a new abstract syntax tree from Python's ast" - -from os.path import join -include_dirs = ['brain', - join('test', 'regrtest_data'), - join('test', 'data'), join('test', 'data2')] +description = "A abstract syntax tree for Python with inference support." classifiers = ["Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: Quality Assurance", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", - ] + ] diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/as_string.py b/pymode/libs/astroid/as_string.py similarity index 85% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/as_string.py rename to pymode/libs/astroid/as_string.py index ace1c4e3..f627f9e8 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/as_string.py +++ b/pymode/libs/astroid/as_string.py @@ -44,29 +44,29 @@ def _repr_tree(node, result, indent='', _done=None, ids=False): if not hasattr(node, '_astroid_fields'): # not a astroid node return if node in _done: - result.append( indent + 'loop in tree: %s' % node ) + result.append(indent + 'loop in tree: %s' % node) return _done.add(node) node_str = str(node) if ids: node_str += ' . \t%x' % id(node) - result.append( indent + node_str ) + result.append(indent + node_str) indent += INDENT for field in node._astroid_fields: value = getattr(node, field) - if isinstance(value, (list, tuple) ): - result.append( indent + field + " = [" ) + if isinstance(value, (list, tuple)): + result.append(indent + field + " = [") for child in value: - if isinstance(child, (list, tuple) ): + if isinstance(child, (list, tuple)): # special case for Dict # FIXME _repr_tree(child[0], result, indent, _done, ids) _repr_tree(child[1], result, indent, _done, ids) result.append(indent + ',') else: _repr_tree(child, result, indent, _done, ids) - result.append( indent + "]" ) + result.append(indent + "]") else: - result.append( indent + field + " = " ) + result.append(indent + field + " = ") _repr_tree(value, result, indent, _done, ids) @@ -97,7 +97,7 @@ def visit_assert(self, node): """return an astroid.Assert node as string""" if node.fail: return 'assert %s, %s' % (node.test.accept(self), - node.fail.accept(self)) + node.fail.accept(self)) return 'assert %s' % node.test.accept(self) def visit_assname(self, node): @@ -124,7 +124,7 @@ def visit_binop(self, node): def visit_boolop(self, node): """return an astroid.BoolOp node as string""" return (' %s ' % node.op).join(['(%s)' % n.accept(self) - for n in node.values]) + for n in node.values]) def visit_break(self, node): """return an astroid.Break node as string""" @@ -135,20 +135,20 @@ def visit_callfunc(self, node): expr_str = node.func.accept(self) args = [arg.accept(self) for arg in node.args] if node.starargs: - args.append( '*' + node.starargs.accept(self)) + args.append('*' + node.starargs.accept(self)) if node.kwargs: - args.append( '**' + node.kwargs.accept(self)) + args.append('**' + node.kwargs.accept(self)) return '%s(%s)' % (expr_str, ', '.join(args)) def visit_class(self, node): """return an astroid.Class node as string""" decorate = node.decorators and node.decorators.accept(self) or '' - bases = ', '.join([n.accept(self) for n in node.bases]) + bases = ', '.join([n.accept(self) for n in node.bases]) if sys.version_info[0] == 2: bases = bases and '(%s)' % bases or '' else: metaclass = node.metaclass() - if metaclass: + if metaclass and not node.has_metaclass_hack(): if bases: bases = '(%s, metaclass=%s)' % (bases, metaclass.name) else: @@ -157,7 +157,7 @@ def visit_class(self, node): bases = bases and '(%s)' % bases or '' docs = node.doc and '\n%s"""%s"""' % (INDENT, node.doc) or '' return '\n\n%sclass %s%s:%s\n%s\n' % (decorate, node.name, bases, docs, - self._stmt_list( node.body)) + self._stmt_list(node.body)) def visit_compare(self, node): """return an astroid.Compare node as string""" @@ -167,9 +167,9 @@ def visit_compare(self, node): def visit_comprehension(self, node): """return an astroid.Comprehension node as string""" - ifs = ''.join([ ' if %s' % n.accept(self) for n in node.ifs]) + ifs = ''.join([' if %s' % n.accept(self) for n in node.ifs]) return 'for %s in %s%s' % (node.target.accept(self), - node.iter.accept(self), ifs ) + node.iter.accept(self), ifs) def visit_const(self, node): """return an astroid.Const node as string""" @@ -182,7 +182,7 @@ def visit_continue(self, node): def visit_delete(self, node): # XXX check if correct """return an astroid.Delete node as string""" return 'del %s' % ', '.join([child.accept(self) - for child in node.targets]) + for child in node.targets]) def visit_delattr(self, node): """return an astroid.DelAttr node as string""" @@ -199,12 +199,13 @@ def visit_decorators(self, node): def visit_dict(self, node): """return an astroid.Dict node as string""" return '{%s}' % ', '.join(['%s: %s' % (key.accept(self), - value.accept(self)) for key, value in node.items]) + value.accept(self)) + for key, value in node.items]) def visit_dictcomp(self, node): """return an astroid.DictComp node as string""" return '{%s: %s %s}' % (node.key.accept(self), node.value.accept(self), - ' '.join([n.accept(self) for n in node.generators])) + ' '.join([n.accept(self) for n in node.generators])) def visit_discard(self, node): """return an astroid.Discard node as string""" @@ -218,7 +219,7 @@ def visit_excepthandler(self, node): if node.type: if node.name: excs = 'except %s, %s' % (node.type.accept(self), - node.name.accept(self)) + node.name.accept(self)) else: excs = 'except %s' % node.type.accept(self) else: @@ -246,13 +247,13 @@ def visit_exec(self, node): def visit_extslice(self, node): """return an astroid.ExtSlice node as string""" - return ','.join( [dim.accept(self) for dim in node.dims] ) + return ','.join([dim.accept(self) for dim in node.dims]) def visit_for(self, node): """return an astroid.For node as string""" fors = 'for %s in %s:\n%s' % (node.target.accept(self), - node.iter.accept(self), - self._stmt_list( node.body)) + node.iter.accept(self), + self._stmt_list(node.body)) if node.orelse: fors = '%s\nelse:\n%s' % (fors, self._stmt_list(node.orelse)) return fors @@ -267,12 +268,12 @@ def visit_function(self, node): decorate = node.decorators and node.decorators.accept(self) or '' docs = node.doc and '\n%s"""%s"""' % (INDENT, node.doc) or '' return '\n%sdef %s(%s):%s\n%s' % (decorate, node.name, node.args.accept(self), - docs, self._stmt_list(node.body)) + docs, self._stmt_list(node.body)) def visit_genexpr(self, node): """return an astroid.GenExpr node as string""" - return '(%s %s)' % (node.elt.accept(self), ' '.join([n.accept(self) - for n in node.generators])) + return '(%s %s)' % (node.elt.accept(self), + ' '.join([n.accept(self) for n in node.generators])) def visit_getattr(self, node): """return an astroid.Getattr node as string""" @@ -292,7 +293,8 @@ def visit_if(self, node): def visit_ifexp(self, node): """return an astroid.IfExp node as string""" return '%s if %s else %s' % (node.body.accept(self), - node.test.accept(self), node.orelse.accept(self)) + node.test.accept(self), + node.orelse.accept(self)) def visit_import(self, node): """return an astroid.Import node as string""" @@ -304,7 +306,8 @@ def visit_keyword(self, node): def visit_lambda(self, node): """return an astroid.Lambda node as string""" - return 'lambda %s: %s' % (node.args.accept(self), node.body.accept(self)) + return 'lambda %s: %s' % (node.args.accept(self), + node.body.accept(self)) def visit_list(self, node): """return an astroid.List node as string""" @@ -312,8 +315,8 @@ def visit_list(self, node): def visit_listcomp(self, node): """return an astroid.ListComp node as string""" - return '[%s %s]' % (node.elt.accept(self), ' '.join([n.accept(self) - for n in node.generators])) + return '[%s %s]' % (node.elt.accept(self), + ' '.join([n.accept(self) for n in node.generators])) def visit_module(self, node): """return an astroid.Module node as string""" @@ -343,10 +346,10 @@ def visit_raise(self, node): if node.inst: if node.tback: return 'raise %s, %s, %s' % (node.exc.accept(self), - node.inst.accept(self), - node.tback.accept(self)) + node.inst.accept(self), + node.tback.accept(self)) return 'raise %s, %s' % (node.exc.accept(self), - node.inst.accept(self)) + node.inst.accept(self)) return 'raise %s' % node.exc.accept(self) return 'raise' @@ -367,8 +370,8 @@ def visit_set(self, node): def visit_setcomp(self, node): """return an astroid.SetComp node as string""" - return '{%s %s}' % (node.elt.accept(self), ' '.join([n.accept(self) - for n in node.generators])) + return '{%s %s}' % (node.elt.accept(self), + ' '.join([n.accept(self) for n in node.generators])) def visit_slice(self, node): """return a astroid.Slice node as string""" @@ -385,7 +388,7 @@ def visit_subscript(self, node): def visit_tryexcept(self, node): """return an astroid.TryExcept node as string""" - trys = ['try:\n%s' % self._stmt_list( node.body)] + trys = ['try:\n%s' % self._stmt_list(node.body)] for handler in node.handlers: trys.append(handler.accept(self)) if node.orelse: @@ -394,13 +397,13 @@ def visit_tryexcept(self, node): def visit_tryfinally(self, node): """return an astroid.TryFinally node as string""" - return 'try:\n%s\nfinally:\n%s' % (self._stmt_list( node.body), - self._stmt_list(node.finalbody)) + return 'try:\n%s\nfinally:\n%s' % (self._stmt_list(node.body), + self._stmt_list(node.finalbody)) def visit_tuple(self, node): """return an astroid.Tuple node as string""" if len(node.elts) == 1: - return '(%s, )' % node.elts[0].accept(self) + return '(%s, )' % node.elts[0].accept(self) return '(%s)' % ', '.join([child.accept(self) for child in node.elts]) def visit_unaryop(self, node): @@ -424,7 +427,7 @@ def visit_with(self, node): # 'with' without 'as' is possible items = ', '.join(('(%s)' % expr.accept(self)) + (vars and ' as (%s)' % (vars.accept(self)) or '') for expr, vars in node.items) - return 'with %s:\n%s' % (items, self._stmt_list( node.body)) + return 'with %s:\n%s' % (items, self._stmt_list(node.body)) def visit_yield(self, node): """yield an ast.Yield node as string""" @@ -443,7 +446,7 @@ def visit_excepthandler(self, node): if node.type: if node.name: excs = 'except %s as %s' % (node.type.accept(self), - node.name.accept(self)) + node.name.accept(self)) else: excs = 'except %s' % node.type.accept(self) else: diff --git a/pymode/libs/astroid/astpeephole.py b/pymode/libs/astroid/astpeephole.py new file mode 100644 index 00000000..af03462a --- /dev/null +++ b/pymode/libs/astroid/astpeephole.py @@ -0,0 +1,86 @@ +# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of astroid. +# +# astroid is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 2.1 of the License, or (at your +# option) any later version. +# +# astroid is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with astroid. If not, see . +"""Small AST optimizations.""" + +import _ast + +from astroid import nodes + + +__all__ = ('ASTPeepholeOptimizer', ) + + +try: + _TYPES = (_ast.Str, _ast.Bytes) +except AttributeError: + _TYPES = (_ast.Str, ) + + +class ASTPeepholeOptimizer(object): + """Class for applying small optimizations to generate new AST.""" + + def optimize_binop(self, node): + """Optimize BinOps with string Const nodes on the lhs. + + This fixes an infinite recursion crash, where multiple + strings are joined using the addition operator. With a + sufficient number of such strings, astroid will fail + with a maximum recursion limit exceeded. The + function will return a Const node with all the strings + already joined. + Return ``None`` if no AST node can be obtained + through optimization. + """ + ast_nodes = [] + current = node + while isinstance(current, _ast.BinOp): + # lhs must be a BinOp with the addition operand. + if not isinstance(current.left, _ast.BinOp): + return + if (not isinstance(current.left.op, _ast.Add) + or not isinstance(current.op, _ast.Add)): + return + + # rhs must a str / bytes. + if not isinstance(current.right, _TYPES): + return + + ast_nodes.append(current.right.s) + current = current.left + + if (isinstance(current, _ast.BinOp) + and isinstance(current.left, _TYPES) + and isinstance(current.right, _TYPES)): + # Stop early if we are at the last BinOp in + # the operation + ast_nodes.append(current.right.s) + ast_nodes.append(current.left.s) + break + + if not ast_nodes: + return + + # If we have inconsistent types, bail out. + known = type(ast_nodes[0]) + if any(type(element) is not known + for element in ast_nodes[1:]): + return + + value = known().join(reversed(ast_nodes)) + newnode = nodes.Const(value) + return newnode diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/bases.py b/pymode/libs/astroid/bases.py similarity index 89% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/bases.py rename to pymode/libs/astroid/bases.py index 5ee11b3b..ee8ee1c3 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/bases.py +++ b/pymode/libs/astroid/bases.py @@ -24,6 +24,8 @@ import sys from contextlib import contextmanager +from logilab.common.decorators import cachedproperty + from astroid.exceptions import (InferenceError, AstroidError, NotFoundError, UnresolvableName, UseInferenceDefault) @@ -57,30 +59,37 @@ def infer(self, context=None): # Inference ################################################################## class InferenceContext(object): - __slots__ = ('path', 'lookupname', 'callcontext', 'boundnode') + __slots__ = ('path', 'lookupname', 'callcontext', 'boundnode', 'infered') - def __init__(self, path=None): - if path is None: - self.path = set() - else: - self.path = path + def __init__(self, path=None, infered=None): + self.path = path or set() self.lookupname = None self.callcontext = None self.boundnode = None + self.infered = infered or {} def push(self, node): name = self.lookupname if (node, name) in self.path: raise StopIteration() - self.path.add( (node, name) ) + self.path.add((node, name)) def clone(self): # XXX copy lookupname/callcontext ? - clone = InferenceContext(self.path) + clone = InferenceContext(self.path, infered=self.infered) clone.callcontext = self.callcontext clone.boundnode = self.boundnode return clone + def cache_generator(self, key, generator): + results = [] + for result in generator: + results.append(result) + yield result + + self.infered[key] = tuple(results) + return + @contextmanager def restore_path(self): path = set(self.path) @@ -170,14 +179,19 @@ def getattr(self, name, context=None, lookupclass=True): def igetattr(self, name, context=None): """inferred getattr""" + if not context: + context = InferenceContext() try: # avoid recursively inferring the same attr on the same class - if context: - context.push((self._proxied, name)) + + context.push((self._proxied, name)) # XXX frame should be self._proxied, or not ? get_attr = self.getattr(name, context, lookupclass=False) - return _infer_stmts(self._wrap_attr(get_attr, context), context, - frame=self) + return _infer_stmts( + self._wrap_attr(get_attr, context), + context, + frame=self, + ) except NotFoundError: try: # fallback to class'igetattr since it has some logic to handle @@ -259,14 +273,14 @@ def infer_call_result(self, caller, context): # instance of the class given as first argument. if (self._proxied.name == '__new__' and self._proxied.parent.frame().qname() == '%s.object' % BUILTINS): - return ((x is YES and x or Instance(x)) - for x in caller.args[0].infer()) + infer = caller.args[0].infer() if caller.args else [] + return ((x is YES and x or Instance(x)) for x in infer) return self._proxied.infer_call_result(caller, context) class BoundMethod(UnboundMethod): """a special node representing a method bound to an instance""" - def __init__(self, proxy, bound): + def __init__(self, proxy, bound): UnboundMethod.__init__(self, proxy) self.bound = bound @@ -377,7 +391,16 @@ def infer(self, context=None, **kwargs): return self._explicit_inference(self, context, **kwargs) except UseInferenceDefault: pass - return self._infer(context, **kwargs) + + if not context: + return self._infer(context, **kwargs) + + key = (self, context.lookupname, + context.callcontext, context.boundnode) + if key in context.infered: + return iter(context.infered[key]) + + return context.cache_generator(key, self._infer(context, **kwargs)) def _repr_name(self): """return self.name or self.attrname or '' for nice representation""" @@ -387,15 +410,14 @@ def __str__(self): return '%s(%s)' % (self.__class__.__name__, self._repr_name()) def __repr__(self): - return '<%s(%s) l.%s [%s] at Ox%x>' % (self.__class__.__name__, - self._repr_name(), - self.fromlineno, - self.root().name, - id(self)) + return '<%s(%s) l.%s [%s] at 0x%x>' % (self.__class__.__name__, + self._repr_name(), + self.fromlineno, + self.root().name, + id(self)) def accept(self, visitor): - klass = self.__class__.__name__ func = getattr(visitor, "visit_" + self.__class__.__name__.lower()) return func(self) @@ -416,7 +438,7 @@ def last_child(self): attr = getattr(self, field) if not attr: # None or empty listy / tuple continue - if isinstance(attr, (list, tuple)): + if attr.__class__ in (list, tuple): return attr[-1] else: return attr @@ -507,16 +529,28 @@ def nearest(self, nodes): # FIXME: raise an exception if nearest is None ? return nearest[0] - def set_line_info(self, lastchild): + # these are lazy because they're relatively expensive to compute for every + # single node, and they rarely get looked at + + @cachedproperty + def fromlineno(self): if self.lineno is None: - self.fromlineno = self._fixed_source_line() + return self._fixed_source_line() else: - self.fromlineno = self.lineno + return self.lineno + + @cachedproperty + def tolineno(self): + if not self._astroid_fields: + # can't have children + lastchild = None + else: + lastchild = self.last_child() if lastchild is None: - self.tolineno = self.fromlineno + return self.fromlineno else: - self.tolineno = lastchild.tolineno - return + return lastchild.tolineno + # TODO / FIXME: assert self.fromlineno is not None, self assert self.tolineno is not None, self @@ -531,7 +565,7 @@ def _fixed_source_line(self): _node = self try: while line is None: - _node = _node.get_children().next() + _node = next(_node.get_children()) line = _node.lineno except StopIteration: _node = self.parent diff --git a/pymode/libs/astroid/brain/builtin_inference.py b/pymode/libs/astroid/brain/builtin_inference.py new file mode 100644 index 00000000..f60e7913 --- /dev/null +++ b/pymode/libs/astroid/brain/builtin_inference.py @@ -0,0 +1,245 @@ +"""Astroid hooks for various builtins.""" + +import sys +from functools import partial +from textwrap import dedent + +import six +from astroid import (MANAGER, UseInferenceDefault, + inference_tip, YES, InferenceError, UnresolvableName) +from astroid import nodes +from astroid.builder import AstroidBuilder + + +def _extend_str(class_node, rvalue): + """function to extend builtin str/unicode class""" + # TODO(cpopa): this approach will make astroid to believe + # that some arguments can be passed by keyword, but + # unfortunately, strings and bytes don't accept keyword arguments. + code = dedent(''' + class whatever(object): + def join(self, iterable): + return {rvalue} + def replace(self, old, new, count=None): + return {rvalue} + def format(self, *args, **kwargs): + return {rvalue} + def encode(self, encoding='ascii', errors=None): + return '' + def decode(self, encoding='ascii', errors=None): + return u'' + def capitalize(self): + return {rvalue} + def title(self): + return {rvalue} + def lower(self): + return {rvalue} + def upper(self): + return {rvalue} + def swapcase(self): + return {rvalue} + def index(self, sub, start=None, end=None): + return 0 + def find(self, sub, start=None, end=None): + return 0 + def count(self, sub, start=None, end=None): + return 0 + def strip(self, chars=None): + return {rvalue} + def lstrip(self, chars=None): + return {rvalue} + def rstrip(self, chars=None): + return {rvalue} + def rjust(self, width, fillchar=None): + return {rvalue} + def center(self, width, fillchar=None): + return {rvalue} + def ljust(self, width, fillchar=None): + return {rvalue} + ''') + code = code.format(rvalue=rvalue) + fake = AstroidBuilder(MANAGER).string_build(code)['whatever'] + for method in fake.mymethods(): + class_node.locals[method.name] = [method] + method.parent = class_node + +def extend_builtins(class_transforms): + from astroid.bases import BUILTINS + builtin_ast = MANAGER.astroid_cache[BUILTINS] + for class_name, transform in class_transforms.items(): + transform(builtin_ast[class_name]) + +if sys.version_info > (3, 0): + extend_builtins({'bytes': partial(_extend_str, rvalue="b''"), + 'str': partial(_extend_str, rvalue="''")}) +else: + extend_builtins({'str': partial(_extend_str, rvalue="''"), + 'unicode': partial(_extend_str, rvalue="u''")}) + + +def register_builtin_transform(transform, builtin_name): + """Register a new transform function for the given *builtin_name*. + + The transform function must accept two parameters, a node and + an optional context. + """ + def _transform_wrapper(node, context=None): + result = transform(node, context=context) + if result: + result.parent = node + result.lineno = node.lineno + result.col_offset = node.col_offset + return iter([result]) + + MANAGER.register_transform(nodes.CallFunc, + inference_tip(_transform_wrapper), + lambda n: (isinstance(n.func, nodes.Name) and + n.func.name == builtin_name)) + + +def _generic_inference(node, context, node_type, transform): + args = node.args + if not args: + return node_type() + if len(node.args) > 1: + raise UseInferenceDefault() + + arg, = args + transformed = transform(arg) + if not transformed: + try: + infered = next(arg.infer(context=context)) + except (InferenceError, StopIteration): + raise UseInferenceDefault() + if infered is YES: + raise UseInferenceDefault() + transformed = transform(infered) + if not transformed or transformed is YES: + raise UseInferenceDefault() + return transformed + + +def _generic_transform(arg, klass, iterables, build_elts): + if isinstance(arg, klass): + return arg + elif isinstance(arg, iterables): + if not all(isinstance(elt, nodes.Const) + for elt in arg.elts): + # TODO(cpopa): Don't support heterogenous elements. + # Not yet, though. + raise UseInferenceDefault() + elts = [elt.value for elt in arg.elts] + elif isinstance(arg, nodes.Dict): + if not all(isinstance(elt[0], nodes.Const) + for elt in arg.items): + raise UseInferenceDefault() + elts = [item[0].value for item in arg.items] + elif (isinstance(arg, nodes.Const) and + isinstance(arg.value, (six.string_types, six.binary_type))): + elts = arg.value + else: + return + return klass(elts=build_elts(elts)) + + +def _infer_builtin(node, context, + klass=None, iterables=None, + build_elts=None): + transform_func = partial( + _generic_transform, + klass=klass, + iterables=iterables, + build_elts=build_elts) + + return _generic_inference(node, context, klass, transform_func) + +# pylint: disable=invalid-name +infer_tuple = partial( + _infer_builtin, + klass=nodes.Tuple, + iterables=(nodes.List, nodes.Set), + build_elts=tuple) + +infer_list = partial( + _infer_builtin, + klass=nodes.List, + iterables=(nodes.Tuple, nodes.Set), + build_elts=list) + +infer_set = partial( + _infer_builtin, + klass=nodes.Set, + iterables=(nodes.List, nodes.Tuple), + build_elts=set) + + +def _get_elts(arg, context): + is_iterable = lambda n: isinstance(n, + (nodes.List, nodes.Tuple, nodes.Set)) + try: + infered = next(arg.infer(context)) + except (InferenceError, UnresolvableName): + raise UseInferenceDefault() + if isinstance(infered, nodes.Dict): + items = infered.items + elif is_iterable(infered): + items = [] + for elt in infered.elts: + # If an item is not a pair of two items, + # then fallback to the default inference. + # Also, take in consideration only hashable items, + # tuples and consts. We are choosing Names as well. + if not is_iterable(elt): + raise UseInferenceDefault() + if len(elt.elts) != 2: + raise UseInferenceDefault() + if not isinstance(elt.elts[0], + (nodes.Tuple, nodes.Const, nodes.Name)): + raise UseInferenceDefault() + items.append(tuple(elt.elts)) + else: + raise UseInferenceDefault() + return items + +def infer_dict(node, context=None): + """Try to infer a dict call to a Dict node. + + The function treats the following cases: + + * dict() + * dict(mapping) + * dict(iterable) + * dict(iterable, **kwargs) + * dict(mapping, **kwargs) + * dict(**kwargs) + + If a case can't be infered, we'll fallback to default inference. + """ + has_keywords = lambda args: all(isinstance(arg, nodes.Keyword) + for arg in args) + if not node.args and not node.kwargs: + # dict() + return nodes.Dict() + elif has_keywords(node.args) and node.args: + # dict(a=1, b=2, c=4) + items = [(nodes.Const(arg.arg), arg.value) for arg in node.args] + elif (len(node.args) >= 2 and + has_keywords(node.args[1:])): + # dict(some_iterable, b=2, c=4) + elts = _get_elts(node.args[0], context) + keys = [(nodes.Const(arg.arg), arg.value) for arg in node.args[1:]] + items = elts + keys + elif len(node.args) == 1: + items = _get_elts(node.args[0], context) + else: + raise UseInferenceDefault() + + empty = nodes.Dict() + empty.items = items + return empty + +# Builtins inference +register_builtin_transform(infer_tuple, 'tuple') +register_builtin_transform(infer_set, 'set') +register_builtin_transform(infer_list, 'list') +register_builtin_transform(infer_dict, 'dict') diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2gi.py b/pymode/libs/astroid/brain/py2gi.py similarity index 75% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2gi.py rename to pymode/libs/astroid/brain/py2gi.py index dd9868db..6747898d 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2gi.py +++ b/pymode/libs/astroid/brain/py2gi.py @@ -4,6 +4,7 @@ """ import inspect +import itertools import sys import re @@ -111,40 +112,33 @@ def _gi_build_stub(parent): return ret -# Overwrite Module.module_import to _actually_ import the introspected module if -# it's a gi module, then build stub code by examining its info and get an astng -# from that - -from astroid.scoped_nodes import Module -_orig_import_module = Module.import_module - -def _new_import_module(self, modname, relative_only=False, level=None): - # Could be a static piece of gi.repository or whatever unrelated module, - # let that fall through - try: - return _orig_import_module(self, modname, relative_only, level) - except AstroidBuildingException: - # we only consider gi.repository submodules - if not modname.startswith('gi.repository.'): - if relative_only and level is None: - level = 0 - modname = self.relative_to_absolute_name(modname, level) - if not modname.startswith('gi.repository.'): - raise +def _import_gi_module(modname): + # we only consider gi.repository submodules + if not modname.startswith('gi.repository.'): + raise AstroidBuildingException() # build astroid representation unless we already tried so if modname not in _inspected_modules: modnames = [modname] - # GLib and GObject have some special case handling - # in pygobject that we need to cope with + optional_modnames = [] + + # GLib and GObject may have some special case handling + # in pygobject that we need to cope with. However at + # least as of pygobject3-3.13.91 the _glib module doesn't + # exist anymore, so if treat these modules as optional. if modname == 'gi.repository.GLib': - modnames.append('gi._glib') + optional_modnames.append('gi._glib') elif modname == 'gi.repository.GObject': - modnames.append('gi._gobject') + optional_modnames.append('gi._gobject') + try: modcode = '' - for m in modnames: - __import__(m) - modcode += _gi_build_stub(sys.modules[m]) + for m in itertools.chain(modnames, optional_modnames): + try: + __import__(m) + modcode += _gi_build_stub(sys.modules[m]) + except ImportError: + if m not in optional_modnames: + raise except ImportError: astng = _inspected_modules[modname] = None else: @@ -156,4 +150,6 @@ def _new_import_module(self, modname, relative_only=False, level=None): raise AstroidBuildingException('Failed to import module %r' % modname) return astng -Module.import_module = _new_import_module + +MANAGER.register_failed_import_hook(_import_gi_module) + diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2mechanize.py b/pymode/libs/astroid/brain/py2mechanize.py similarity index 53% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2mechanize.py rename to pymode/libs/astroid/brain/py2mechanize.py index 1e0b102d..20a253a4 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2mechanize.py +++ b/pymode/libs/astroid/brain/py2mechanize.py @@ -1,8 +1,8 @@ -from astroid import MANAGER +from astroid import MANAGER, register_module_extender from astroid.builder import AstroidBuilder -def mechanize_transform(module): - fake = AstroidBuilder(MANAGER).string_build(''' +def mechanize_transform(): + return AstroidBuilder(MANAGER).string_build(''' class Browser(object): def open(self, url, data=None, timeout=None): @@ -13,8 +13,6 @@ def open_local_file(self, filename): return None ''') - module.locals['Browser'] = fake.locals['Browser'] -import py2stdlib -py2stdlib.MODULE_TRANSFORMS['mechanize'] = mechanize_transform +register_module_extender(MANAGER, 'mechanize', mechanize_transform) diff --git a/pymode/libs/astroid/brain/py2pytest.py b/pymode/libs/astroid/brain/py2pytest.py new file mode 100644 index 00000000..e24d449c --- /dev/null +++ b/pymode/libs/astroid/brain/py2pytest.py @@ -0,0 +1,31 @@ +"""Astroid hooks for pytest.""" + +from astroid import MANAGER, register_module_extender +from astroid.builder import AstroidBuilder + + +def pytest_transform(): + return AstroidBuilder(MANAGER).string_build(''' + +try: + import _pytest.mark + import _pytest.recwarn + import _pytest.runner + import _pytest.python +except ImportError: + pass +else: + deprecated_call = _pytest.recwarn.deprecated_call + exit = _pytest.runner.exit + fail = _pytest.runner.fail + fixture = _pytest.python.fixture + importorskip = _pytest.runner.importorskip + mark = _pytest.mark.MarkGenerator() + raises = _pytest.python.raises + skip = _pytest.runner.skip + yield_fixture = _pytest.python.yield_fixture + +''') + +register_module_extender(MANAGER, 'pytest', pytest_transform) +register_module_extender(MANAGER, 'py.test', pytest_transform) diff --git a/pymode/libs/astroid/brain/py2qt4.py b/pymode/libs/astroid/brain/py2qt4.py new file mode 100644 index 00000000..d5578097 --- /dev/null +++ b/pymode/libs/astroid/brain/py2qt4.py @@ -0,0 +1,22 @@ +"""Astroid hooks for the Python 2 qt4 module. + +Currently help understanding of : + +* PyQT4.QtCore +""" + +from astroid import MANAGER, register_module_extender +from astroid.builder import AstroidBuilder + + +def pyqt4_qtcore_transform(): + return AstroidBuilder(MANAGER).string_build(''' + +def SIGNAL(signal_name): pass + +class QObject(object): + def emit(self, signal): pass +''') + + +register_module_extender(MANAGER, 'PyQt4.QtCore', pyqt4_qtcore_transform) diff --git a/pymode/libs/astroid/brain/py2stdlib.py b/pymode/libs/astroid/brain/py2stdlib.py new file mode 100644 index 00000000..2bfcbcd3 --- /dev/null +++ b/pymode/libs/astroid/brain/py2stdlib.py @@ -0,0 +1,334 @@ + +"""Astroid hooks for the Python 2 standard library. + +Currently help understanding of : + +* hashlib.md5 and hashlib.sha1 +""" + +import sys +from functools import partial +from textwrap import dedent + +from astroid import ( + MANAGER, AsStringRegexpPredicate, + UseInferenceDefault, inference_tip, + YES, InferenceError, register_module_extender) +from astroid import exceptions +from astroid import nodes +from astroid.builder import AstroidBuilder + +PY3K = sys.version_info > (3, 0) +PY33 = sys.version_info >= (3, 3) + +# general function + +def infer_func_form(node, base_type, context=None, enum=False): + """Specific inference function for namedtuple or Python 3 enum. """ + def infer_first(node): + try: + value = next(node.infer(context=context)) + if value is YES: + raise UseInferenceDefault() + else: + return value + except StopIteration: + raise InferenceError() + + # node is a CallFunc node, class name as first argument and generated class + # attributes as second argument + if len(node.args) != 2: + # something weird here, go back to class implementation + raise UseInferenceDefault() + # namedtuple or enums list of attributes can be a list of strings or a + # whitespace-separate string + try: + name = infer_first(node.args[0]).value + names = infer_first(node.args[1]) + try: + attributes = names.value.replace(',', ' ').split() + except AttributeError: + if not enum: + attributes = [infer_first(const).value for const in names.elts] + else: + # Enums supports either iterator of (name, value) pairs + # or mappings. + # TODO: support only list, tuples and mappings. + if hasattr(names, 'items') and isinstance(names.items, list): + attributes = [infer_first(const[0]).value + for const in names.items + if isinstance(const[0], nodes.Const)] + elif hasattr(names, 'elts'): + # Enums can support either ["a", "b", "c"] + # or [("a", 1), ("b", 2), ...], but they can't + # be mixed. + if all(isinstance(const, nodes.Tuple) + for const in names.elts): + attributes = [infer_first(const.elts[0]).value + for const in names.elts + if isinstance(const, nodes.Tuple)] + else: + attributes = [infer_first(const).value + for const in names.elts] + else: + raise AttributeError + if not attributes: + raise AttributeError + except (AttributeError, exceptions.InferenceError) as exc: + raise UseInferenceDefault() + # we want to return a Class node instance with proper attributes set + class_node = nodes.Class(name, 'docstring') + class_node.parent = node.parent + # set base class=tuple + class_node.bases.append(base_type) + # XXX add __init__(*attributes) method + for attr in attributes: + fake_node = nodes.EmptyNode() + fake_node.parent = class_node + class_node.instance_attrs[attr] = [fake_node] + return class_node, name, attributes + + +# module specific transformation functions ##################################### + +def hashlib_transform(): + template = ''' + +class %(name)s(object): + def __init__(self, value=''): pass + def digest(self): + return %(digest)s + def copy(self): + return self + def update(self, value): pass + def hexdigest(self): + return '' + @property + def name(self): + return %(name)r + @property + def block_size(self): + return 1 + @property + def digest_size(self): + return 1 +''' + algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') + classes = "".join( + template % {'name': hashfunc, 'digest': 'b""' if PY3K else '""'} + for hashfunc in algorithms) + return AstroidBuilder(MANAGER).string_build(classes) + + +def collections_transform(): + return AstroidBuilder(MANAGER).string_build(''' + +class defaultdict(dict): + default_factory = None + def __missing__(self, key): pass + +class deque(object): + maxlen = 0 + def __init__(self, iterable=None, maxlen=None): pass + def append(self, x): pass + def appendleft(self, x): pass + def clear(self): pass + def count(self, x): return 0 + def extend(self, iterable): pass + def extendleft(self, iterable): pass + def pop(self): pass + def popleft(self): pass + def remove(self, value): pass + def reverse(self): pass + def rotate(self, n): pass + def __iter__(self): return self + +''') + + +def pkg_resources_transform(): + return AstroidBuilder(MANAGER).string_build(''' + +def resource_exists(package_or_requirement, resource_name): + pass + +def resource_isdir(package_or_requirement, resource_name): + pass + +def resource_filename(package_or_requirement, resource_name): + pass + +def resource_stream(package_or_requirement, resource_name): + pass + +def resource_string(package_or_requirement, resource_name): + pass + +def resource_listdir(package_or_requirement, resource_name): + pass + +def extraction_error(): + pass + +def get_cache_path(archive_name, names=()): + pass + +def postprocess(tempname, filename): + pass + +def set_extraction_path(path): + pass + +def cleanup_resources(force=False): + pass + +''') + + +def subprocess_transform(): + if PY3K: + communicate = (bytes('string', 'ascii'), bytes('string', 'ascii')) + init = """ + def __init__(self, args, bufsize=0, executable=None, + stdin=None, stdout=None, stderr=None, + preexec_fn=None, close_fds=False, shell=False, + cwd=None, env=None, universal_newlines=False, + startupinfo=None, creationflags=0, restore_signals=True, + start_new_session=False, pass_fds=()): + pass + """ + else: + communicate = ('string', 'string') + init = """ + def __init__(self, args, bufsize=0, executable=None, + stdin=None, stdout=None, stderr=None, + preexec_fn=None, close_fds=False, shell=False, + cwd=None, env=None, universal_newlines=False, + startupinfo=None, creationflags=0): + pass + """ + if PY33: + wait_signature = 'def wait(self, timeout=None)' + else: + wait_signature = 'def wait(self)' + return AstroidBuilder(MANAGER).string_build(''' + +class Popen(object): + returncode = pid = 0 + stdin = stdout = stderr = file() + + %(init)s + + def communicate(self, input=None): + return %(communicate)r + %(wait_signature)s: + return self.returncode + def poll(self): + return self.returncode + def send_signal(self, signal): + pass + def terminate(self): + pass + def kill(self): + pass + ''' % {'init': init, + 'communicate': communicate, + 'wait_signature': wait_signature}) + + +# namedtuple support ########################################################### + +def looks_like_namedtuple(node): + func = node.func + if type(func) is nodes.Getattr: + return func.attrname == 'namedtuple' + if type(func) is nodes.Name: + return func.name == 'namedtuple' + return False + +def infer_named_tuple(node, context=None): + """Specific inference function for namedtuple CallFunc node""" + class_node, name, attributes = infer_func_form(node, nodes.Tuple._proxied, + context=context) + fake = AstroidBuilder(MANAGER).string_build(''' +class %(name)s(tuple): + _fields = %(fields)r + def _asdict(self): + return self.__dict__ + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + return new(cls, iterable) + def _replace(_self, **kwds): + result = _self._make(map(kwds.pop, %(fields)r, _self)) + if kwds: + raise ValueError('Got unexpected field names: %%r' %% list(kwds)) + return result + ''' % {'name': name, 'fields': attributes}) + class_node.locals['_asdict'] = fake.body[0].locals['_asdict'] + class_node.locals['_make'] = fake.body[0].locals['_make'] + class_node.locals['_replace'] = fake.body[0].locals['_replace'] + class_node.locals['_fields'] = fake.body[0].locals['_fields'] + # we use UseInferenceDefault, we can't be a generator so return an iterator + return iter([class_node]) + +def infer_enum(node, context=None): + """ Specific inference function for enum CallFunc node. """ + enum_meta = nodes.Class("EnumMeta", 'docstring') + class_node = infer_func_form(node, enum_meta, + context=context, enum=True)[0] + return iter([class_node.instanciate_class()]) + +def infer_enum_class(node): + """ Specific inference for enums. """ + names = set(('Enum', 'IntEnum', 'enum.Enum', 'enum.IntEnum')) + for basename in node.basenames: + # TODO: doesn't handle subclasses yet. This implementation + # is a hack to support enums. + if basename not in names: + continue + if node.root().name == 'enum': + # Skip if the class is directly from enum module. + break + for local, values in node.locals.items(): + if any(not isinstance(value, nodes.AssName) + for value in values): + continue + + stmt = values[0].statement() + if isinstance(stmt.targets[0], nodes.Tuple): + targets = stmt.targets[0].itered() + else: + targets = stmt.targets + + new_targets = [] + for target in targets: + # Replace all the assignments with our mocked class. + classdef = dedent(''' + class %(name)s(object): + @property + def value(self): + # Not the best return. + return None + @property + def name(self): + return %(name)r + ''' % {'name': target.name}) + fake = AstroidBuilder(MANAGER).string_build(classdef)[target.name] + fake.parent = target.parent + for method in node.mymethods(): + fake.locals[method.name] = [method] + new_targets.append(fake.instanciate_class()) + node.locals[local] = new_targets + break + return node + + +MANAGER.register_transform(nodes.CallFunc, inference_tip(infer_named_tuple), + looks_like_namedtuple) +MANAGER.register_transform(nodes.CallFunc, inference_tip(infer_enum), + AsStringRegexpPredicate('Enum', 'func')) +MANAGER.register_transform(nodes.Class, infer_enum_class) +register_module_extender(MANAGER, 'hashlib', hashlib_transform) +register_module_extender(MANAGER, 'collections', collections_transform) +register_module_extender(MANAGER, 'pkg_resources', pkg_resources_transform) +register_module_extender(MANAGER, 'subprocess', subprocess_transform) diff --git a/pymode/libs/astroid/brain/pynose.py b/pymode/libs/astroid/brain/pynose.py new file mode 100644 index 00000000..67a6fb8f --- /dev/null +++ b/pymode/libs/astroid/brain/pynose.py @@ -0,0 +1,79 @@ +# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of astroid. +# +# astroid is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 2.1 of the License, or (at your +# option) any later version. +# +# astroid is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with astroid. If not, see . + +"""Hooks for nose library.""" + +import re +import textwrap + +import astroid +import astroid.builder + +_BUILDER = astroid.builder.AstroidBuilder(astroid.MANAGER) + + +def _pep8(name, caps=re.compile('([A-Z])')): + return caps.sub(lambda m: '_' + m.groups()[0].lower(), name) + + +def _nose_tools_functions(): + """Get an iterator of names and bound methods.""" + module = _BUILDER.string_build(textwrap.dedent(''' + import unittest + + class Test(unittest.TestCase): + pass + a = Test() + ''')) + try: + case = next(module['a'].infer()) + except astroid.InferenceError: + return + for method in case.methods(): + if method.name.startswith('assert') and '_' not in method.name: + pep8_name = _pep8(method.name) + yield pep8_name, astroid.BoundMethod(method, case) + + +def _nose_tools_transform(node): + for method_name, method in _nose_tools_functions(): + node.locals[method_name] = [method] + + +def _nose_tools_trivial_transform(): + """Custom transform for the nose.tools module.""" + stub = _BUILDER.string_build('''__all__ = []''') + all_entries = ['ok_', 'eq_'] + + for pep8_name, method in _nose_tools_functions(): + all_entries.append(pep8_name) + stub[pep8_name] = method + + # Update the __all__ variable, since nose.tools + # does this manually with .append. + all_assign = stub['__all__'].parent + all_object = astroid.List(all_entries) + all_object.parent = all_assign + all_assign.value = all_object + return stub + + +astroid.register_module_extender(astroid.MANAGER, 'nose.tools.trivial', + _nose_tools_trivial_transform) +astroid.MANAGER.register_transform(astroid.Module, _nose_tools_transform, + lambda n: n.name == 'nose.tools') diff --git a/pymode/libs/astroid/brain/pysix_moves.py b/pymode/libs/astroid/brain/pysix_moves.py new file mode 100644 index 00000000..548d9761 --- /dev/null +++ b/pymode/libs/astroid/brain/pysix_moves.py @@ -0,0 +1,261 @@ +# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of astroid. +# +# astroid is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# astroid is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with astroid. If not, see . + +"""Astroid hooks for six.moves.""" + +import sys +from textwrap import dedent + +from astroid import MANAGER, register_module_extender +from astroid.builder import AstroidBuilder +from astroid.exceptions import AstroidBuildingException + +def _indent(text, prefix, predicate=None): + """Adds 'prefix' to the beginning of selected lines in 'text'. + + If 'predicate' is provided, 'prefix' will only be added to the lines + where 'predicate(line)' is True. If 'predicate' is not provided, + it will default to adding 'prefix' to all non-empty lines that do not + consist solely of whitespace characters. + """ + if predicate is None: + predicate = lambda line: line.strip() + + def prefixed_lines(): + for line in text.splitlines(True): + yield prefix + line if predicate(line) else line + return ''.join(prefixed_lines()) + + +if sys.version_info[0] == 2: + _IMPORTS_2 = """ + import BaseHTTPServer + import CGIHTTPServer + import SimpleHTTPServer + + from StringIO import StringIO + from cStringIO import StringIO as cStringIO + from UserDict import UserDict + from UserList import UserList + from UserString import UserString + + import __builtin__ as builtins + import thread as _thread + import dummy_thread as _dummy_thread + import ConfigParser as configparser + import copy_reg as copyreg + from itertools import (imap as map, + ifilter as filter, + ifilterfalse as filterfalse, + izip_longest as zip_longest, + izip as zip) + import htmlentitydefs as html_entities + import HTMLParser as html_parser + import httplib as http_client + import cookielib as http_cookiejar + import Cookie as http_cookies + import Queue as queue + import repr as reprlib + from pipes import quote as shlex_quote + import SocketServer as socketserver + import SimpleXMLRPCServer as xmlrpc_server + import xmlrpclib as xmlrpc_client + import _winreg as winreg + import robotparser as urllib_robotparser + import Tkinter as tkinter + import tkFileDialog as tkinter_tkfiledialog + + input = raw_input + intern = intern + range = xrange + xrange = xrange + reduce = reduce + reload_module = reload + + class UrllibParse(object): + import urlparse as _urlparse + import urllib as _urllib + ParseResult = _urlparse.ParseResult + SplitResult = _urlparse.SplitResult + parse_qs = _urlparse.parse_qs + parse_qsl = _urlparse.parse_qsl + urldefrag = _urlparse.urldefrag + urljoin = _urlparse.urljoin + urlparse = _urlparse.urlparse + urlsplit = _urlparse.urlsplit + urlunparse = _urlparse.urlunparse + urlunsplit = _urlparse.urlunsplit + quote = _urllib.quote + quote_plus = _urllib.quote_plus + unquote = _urllib.unquote + unquote_plus = _urllib.unquote_plus + urlencode = _urllib.urlencode + splitquery = _urllib.splitquery + splittag = _urllib.splittag + splituser = _urllib.splituser + uses_fragment = _urlparse.uses_fragment + uses_netloc = _urlparse.uses_netloc + uses_params = _urlparse.uses_params + uses_query = _urlparse.uses_query + uses_relative = _urlparse.uses_relative + + class UrllibError(object): + import urllib2 as _urllib2 + import urllib as _urllib + URLError = _urllib2.URLError + HTTPError = _urllib2.HTTPError + ContentTooShortError = _urllib.ContentTooShortError + + class DummyModule(object): + pass + + class UrllibRequest(object): + import urlparse as _urlparse + import urllib2 as _urllib2 + import urllib as _urllib + urlopen = _urllib2.urlopen + install_opener = _urllib2.install_opener + build_opener = _urllib2.build_opener + pathname2url = _urllib.pathname2url + url2pathname = _urllib.url2pathname + getproxies = _urllib.getproxies + Request = _urllib2.Request + OpenerDirector = _urllib2.OpenerDirector + HTTPDefaultErrorHandler = _urllib2.HTTPDefaultErrorHandler + HTTPRedirectHandler = _urllib2.HTTPRedirectHandler + HTTPCookieProcessor = _urllib2.HTTPCookieProcessor + ProxyHandler = _urllib2.ProxyHandler + BaseHandler = _urllib2.BaseHandler + HTTPPasswordMgr = _urllib2.HTTPPasswordMgr + HTTPPasswordMgrWithDefaultRealm = _urllib2.HTTPPasswordMgrWithDefaultRealm + AbstractBasicAuthHandler = _urllib2.AbstractBasicAuthHandler + HTTPBasicAuthHandler = _urllib2.HTTPBasicAuthHandler + ProxyBasicAuthHandler = _urllib2.ProxyBasicAuthHandler + AbstractDigestAuthHandler = _urllib2.AbstractDigestAuthHandler + HTTPDigestAuthHandler = _urllib2.HTTPDigestAuthHandler + ProxyDigestAuthHandler = _urllib2.ProxyDigestAuthHandler + HTTPHandler = _urllib2.HTTPHandler + HTTPSHandler = _urllib2.HTTPSHandler + FileHandler = _urllib2.FileHandler + FTPHandler = _urllib2.FTPHandler + CacheFTPHandler = _urllib2.CacheFTPHandler + UnknownHandler = _urllib2.UnknownHandler + HTTPErrorProcessor = _urllib2.HTTPErrorProcessor + urlretrieve = _urllib.urlretrieve + urlcleanup = _urllib.urlcleanup + proxy_bypass = _urllib.proxy_bypass + + urllib_parse = UrllibParse() + urllib_error = UrllibError() + urllib = DummyModule() + urllib.request = UrllibRequest() + urllib.parse = UrllibParse() + urllib.error = UrllibError() + """ +else: + _IMPORTS_3 = """ + import _io + cStringIO = _io.StringIO + filter = filter + from itertools import filterfalse + input = input + from sys import intern + map = map + range = range + from imp import reload as reload_module + from functools import reduce + from shlex import quote as shlex_quote + from io import StringIO + from collections import UserDict, UserList, UserString + xrange = range + zip = zip + from itertools import zip_longest + import builtins + import configparser + import copyreg + import _dummy_thread + import http.cookiejar as http_cookiejar + import http.cookies as http_cookies + import html.entities as html_entities + import html.parser as html_parser + import http.client as http_client + import http.server + BaseHTTPServer = CGIHTTPServer = SimpleHTTPServer = http.server + import pickle as cPickle + import queue + import reprlib + import socketserver + import _thread + import winreg + import xmlrpc.server as xmlrpc_server + import xmlrpc.client as xmlrpc_client + import urllib.robotparser as urllib_robotparser + import email.mime.multipart as email_mime_multipart + import email.mime.nonmultipart as email_mime_nonmultipart + import email.mime.text as email_mime_text + import email.mime.base as email_mime_base + import urllib.parse as urllib_parse + import urllib.error as urllib_error + import tkinter + import tkinter.dialog as tkinter_dialog + import tkinter.filedialog as tkinter_filedialog + import tkinter.scrolledtext as tkinter_scrolledtext + import tkinter.simpledialog as tkinder_simpledialog + import tkinter.tix as tkinter_tix + import tkinter.ttk as tkinter_ttk + import tkinter.constants as tkinter_constants + import tkinter.dnd as tkinter_dnd + import tkinter.colorchooser as tkinter_colorchooser + import tkinter.commondialog as tkinter_commondialog + import tkinter.filedialog as tkinter_tkfiledialog + import tkinter.font as tkinter_font + import tkinter.messagebox as tkinter_messagebox + import urllib.request + import urllib.robotparser as urllib_robotparser + import urllib.parse as urllib_parse + import urllib.error as urllib_error + """ +if sys.version_info[0] == 2: + _IMPORTS = dedent(_IMPORTS_2) +else: + _IMPORTS = dedent(_IMPORTS_3) + + +def six_moves_transform(): + code = dedent(''' + class Moves(object): + {} + moves = Moves() + ''').format(_indent(_IMPORTS, " ")) + module = AstroidBuilder(MANAGER).string_build(code) + module.name = 'six.moves' + return module + + +def _six_fail_hook(modname): + if modname != 'six.moves': + raise AstroidBuildingException + module = AstroidBuilder(MANAGER).string_build(_IMPORTS) + module.name = 'six.moves' + return module + + +register_module_extender(MANAGER, 'six', six_moves_transform) +register_module_extender(MANAGER, 'requests.packages.urllib3.packages.six', + six_moves_transform) +MANAGER.register_failed_import_hook(_six_fail_hook) diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/builder.py b/pymode/libs/astroid/builder.py similarity index 85% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/builder.py rename to pymode/libs/astroid/builder.py index b6ceff82..1fe7a36d 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/builder.py +++ b/pymode/libs/astroid/builder.py @@ -1,4 +1,4 @@ -# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of astroid. @@ -20,19 +20,19 @@ The builder is not thread safe and can't be used to parse different sources at the same time. """ +from __future__ import with_statement __docformat__ = "restructuredtext en" import sys from os.path import splitext, basename, exists, abspath -from logilab.common.modutils import modpath_from_file - from astroid.exceptions import AstroidBuildingException, InferenceError from astroid.raw_building import InspectBuilder from astroid.rebuilder import TreeRebuilder from astroid.manager import AstroidManager from astroid.bases import YES, Instance +from astroid.modutils import modpath_from_file from _ast import PyCF_ONLY_AST def parse(string): @@ -42,13 +42,12 @@ def parse(string): from tokenize import detect_encoding def open_source_file(filename): - byte_stream = open(filename, 'bU') - encoding = detect_encoding(byte_stream.readline)[0] - byte_stream.close() - stream = open(filename, 'U', encoding=encoding) + with open(filename, 'rb') as byte_stream: + encoding = detect_encoding(byte_stream.readline)[0] + stream = open(filename, 'r', newline=None, encoding=encoding) try: data = stream.read() - except UnicodeError, uex: # wrong encodingg + except UnicodeError: # wrong encodingg # detect_encoding returns utf-8 if no encoding specified msg = 'Wrong (%s) or no encoding specified' % encoding raise AstroidBuildingException(msg) @@ -57,7 +56,7 @@ def open_source_file(filename): else: import re - _ENCODING_RGX = re.compile("\s*#+.*coding[:=]\s*([-\w.]+)") + _ENCODING_RGX = re.compile(r"\s*#+.*coding[:=]\s*([-\w.]+)") def _guess_encoding(string): """get encoding from a python file as string or return None if not found @@ -117,22 +116,23 @@ def file_build(self, path, modname=None): """ try: stream, encoding, data = open_source_file(path) - except IOError, exc: + except IOError as exc: msg = 'Unable to load file %r (%s)' % (path, exc) raise AstroidBuildingException(msg) - except SyntaxError, exc: # py3k encoding specification error + except SyntaxError as exc: # py3k encoding specification error raise AstroidBuildingException(exc) - except LookupError, exc: # unknown encoding + except LookupError as exc: # unknown encoding raise AstroidBuildingException(exc) - # get module name if necessary - if modname is None: - try: - modname = '.'.join(modpath_from_file(path)) - except ImportError: - modname = splitext(basename(path))[0] - # build astroid representation - module = self._data_build(data, modname, path) - return self._post_build(module, encoding) + with stream: + # get module name if necessary + if modname is None: + try: + modname = '.'.join(modpath_from_file(path)) + except ImportError: + modname = splitext(basename(path))[0] + # build astroid representation + module = self._data_build(data, modname, path) + return self._post_build(module, encoding) def string_build(self, data, modname='', path=None): """build astroid from source code string and return rebuilded astroid""" @@ -160,7 +160,10 @@ def _post_build(self, module, encoding): def _data_build(self, data, modname, path): """build tree node from data and add some informations""" # this method could be wrapped with a pickle/cache function - node = parse(data + '\n') + try: + node = parse(data + '\n') + except TypeError as exc: + raise AstroidBuildingException(exc) if path is not None: node_file = abspath(path) else: @@ -171,8 +174,7 @@ def _data_build(self, data, modname, path): else: package = path and path.find('__init__.py') > -1 or False rebuilder = TreeRebuilder(self._manager) - module = rebuilder.visit_module(node, modname, package) - module.file = module.path = node_file + module = rebuilder.visit_module(node, modname, node_file, package) module._from_nodes = rebuilder._from_nodes module._delayed_assattr = rebuilder._delayed_assattr return module @@ -188,8 +190,8 @@ def sort_locals(my_list): for (name, asname) in node.names: if name == '*': try: - imported = node.root().import_module(node.modname) - except AstroidBuildingException: + imported = node.do_import_module() + except InferenceError: continue for name in imported.wildcard_import_names(): node.parent.set_local(name, node) diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/exceptions.py b/pymode/libs/astroid/exceptions.py similarity index 100% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/exceptions.py rename to pymode/libs/astroid/exceptions.py diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/inference.py b/pymode/libs/astroid/inference.py similarity index 87% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/inference.py rename to pymode/libs/astroid/inference.py index 35cce332..22807049 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/inference.py +++ b/pymode/libs/astroid/inference.py @@ -25,16 +25,19 @@ from astroid import nodes from astroid.manager import AstroidManager -from astroid.exceptions import (AstroidError, - InferenceError, NoDefault, NotFoundError, UnresolvableName) -from astroid.bases import YES, Instance, InferenceContext, \ - _infer_stmts, copy_context, path_wrapper, raise_if_nothing_infered -from astroid.protocols import _arguments_infer_argname +from astroid.exceptions import (AstroidError, InferenceError, NoDefault, + NotFoundError, UnresolvableName) +from astroid.bases import (YES, Instance, InferenceContext, + _infer_stmts, copy_context, path_wrapper, + raise_if_nothing_infered) +from astroid.protocols import ( + _arguments_infer_argname, + BIN_OP_METHOD, UNARY_OP_METHOD) MANAGER = AstroidManager() -class CallContext: +class CallContext(object): """when inferring a function call, this class is used to remember values given as argument """ @@ -141,11 +144,37 @@ def infer_end(self, context=None): nodes.Dict._infer = infer_end nodes.Set._infer = infer_end +def _higher_function_scope(node): + """ Search for the first function which encloses the given + scope. This can be used for looking up in that function's + scope, in case looking up in a lower scope for a particular + name fails. + + :param node: A scope node. + :returns: + ``None``, if no parent function scope was found, + otherwise an instance of :class:`astroid.scoped_nodes.Function`, + which encloses the given node. + """ + current = node + while current.parent and not isinstance(current.parent, nodes.Function): + current = current.parent + if current and current.parent: + return current.parent + def infer_name(self, context=None): """infer a Name: use name lookup rules""" frame, stmts = self.lookup(self.name) if not stmts: - raise UnresolvableName(self.name) + # Try to see if the name is enclosed in a nested function + # and use the higher (first function) scope for searching. + # TODO: should this be promoted to other nodes as well? + parent_function = _higher_function_scope(self.scope()) + if parent_function: + _, stmts = parent_function.lookup(self.name) + + if not stmts: + raise UnresolvableName(self.name) context = context.clone() context.lookupname = self.name return _infer_stmts(stmts, context, frame) @@ -197,7 +226,7 @@ def infer_from(self, context=None, asname=True): raise InferenceError() if asname: name = self.real_name(name) - module = self.do_import_module(self.modname) + module = self.do_import_module() try: context = copy_context(context) context.lookupname = name @@ -209,7 +238,6 @@ def infer_from(self, context=None, asname=True): def infer_getattr(self, context=None): """infer a Getattr node by using getattr on the associated object""" - #context = context.clone() for owner in self.expr.infer(context): if owner is YES: yield owner @@ -240,12 +268,12 @@ def infer_global(self, context=None): def infer_subscript(self, context=None): """infer simple subscription such as [1,2,3][0] or (1,2,3)[-1]""" - value = self.value.infer(context).next() + value = next(self.value.infer(context)) if value is YES: yield YES return - index = self.slice.infer(context).next() + index = next(self.slice.infer(context)) if index is YES: yield YES return @@ -258,6 +286,12 @@ def infer_subscript(self, context=None): except (IndexError, TypeError): yield YES return + + # Prevent inferring if the infered subscript + # is the same as the original subscripted object. + if self is assigned: + yield YES + return for infered in assigned.infer(context): yield infered else: @@ -265,13 +299,6 @@ def infer_subscript(self, context=None): nodes.Subscript._infer = path_wrapper(infer_subscript) nodes.Subscript.infer_lhs = raise_if_nothing_infered(infer_subscript) - -UNARY_OP_METHOD = {'+': '__pos__', - '-': '__neg__', - '~': '__invert__', - 'not': None, # XXX not '__nonzero__' - } - def infer_unaryop(self, context=None): for operand in self.operand.infer(context): try: @@ -294,21 +321,6 @@ def infer_unaryop(self, context=None): yield YES nodes.UnaryOp._infer = path_wrapper(infer_unaryop) - -BIN_OP_METHOD = {'+': '__add__', - '-': '__sub__', - '/': '__div__', - '//': '__floordiv__', - '*': '__mul__', - '**': '__power__', - '%': '__mod__', - '&': '__and__', - '|': '__or__', - '^': '__xor__', - '<<': '__lshift__', - '>>': '__rshift__', - } - def _infer_binop(operator, operand1, operand2, context, failures=None): if operand1 is YES: yield operand1 @@ -381,7 +393,7 @@ def infer_empty_node(self, context=None): else: try: for infered in MANAGER.infer_ast_from_something(self.object, - context=context): + context=context): yield infered except AstroidError: yield YES diff --git a/pymode/libs/astroid/inspector.py b/pymode/libs/astroid/inspector.py new file mode 100644 index 00000000..1fc31926 --- /dev/null +++ b/pymode/libs/astroid/inspector.py @@ -0,0 +1,273 @@ +# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of astroid. +# +# astroid is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 2.1 of the License, or (at your +# option) any later version. +# +# astroid is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with astroid. If not, see . +"""visitor doing some postprocessing on the astroid tree. +Try to resolve definitions (namespace) dictionary, relationship... + +This module has been imported from pyreverse +""" + +__docformat__ = "restructuredtext en" + +from os.path import dirname + +import astroid +from astroid.exceptions import InferenceError +from astroid.utils import LocalsVisitor +from astroid.modutils import get_module_part, is_relative, is_standard_module + +class IdGeneratorMixIn(object): + """ + Mixin adding the ability to generate integer uid + """ + def __init__(self, start_value=0): + self.id_count = start_value + + def init_counter(self, start_value=0): + """init the id counter + """ + self.id_count = start_value + + def generate_id(self): + """generate a new identifier + """ + self.id_count += 1 + return self.id_count + + +class Linker(IdGeneratorMixIn, LocalsVisitor): + """ + walk on the project tree and resolve relationships. + + According to options the following attributes may be added to visited nodes: + + * uid, + a unique identifier for the node (on astroid.Project, astroid.Module, + astroid.Class and astroid.locals_type). Only if the linker has been instantiated + with tag=True parameter (False by default). + + * Function + a mapping from locals names to their bounded value, which may be a + constant like a string or an integer, or an astroid node (on astroid.Module, + astroid.Class and astroid.Function). + + * instance_attrs_type + as locals_type but for klass member attributes (only on astroid.Class) + + * implements, + list of implemented interface _objects_ (only on astroid.Class nodes) + """ + + def __init__(self, project, inherited_interfaces=0, tag=False): + IdGeneratorMixIn.__init__(self) + LocalsVisitor.__init__(self) + # take inherited interface in consideration or not + self.inherited_interfaces = inherited_interfaces + # tag nodes or not + self.tag = tag + # visited project + self.project = project + + + def visit_project(self, node): + """visit an astroid.Project node + + * optionally tag the node with a unique id + """ + if self.tag: + node.uid = self.generate_id() + for module in node.modules: + self.visit(module) + + def visit_package(self, node): + """visit an astroid.Package node + + * optionally tag the node with a unique id + """ + if self.tag: + node.uid = self.generate_id() + for subelmt in node.values(): + self.visit(subelmt) + + def visit_module(self, node): + """visit an astroid.Module node + + * set the locals_type mapping + * set the depends mapping + * optionally tag the node with a unique id + """ + if hasattr(node, 'locals_type'): + return + node.locals_type = {} + node.depends = [] + if self.tag: + node.uid = self.generate_id() + + def visit_class(self, node): + """visit an astroid.Class node + + * set the locals_type and instance_attrs_type mappings + * set the implements list and build it + * optionally tag the node with a unique id + """ + if hasattr(node, 'locals_type'): + return + node.locals_type = {} + if self.tag: + node.uid = self.generate_id() + # resolve ancestors + for baseobj in node.ancestors(recurs=False): + specializations = getattr(baseobj, 'specializations', []) + specializations.append(node) + baseobj.specializations = specializations + # resolve instance attributes + node.instance_attrs_type = {} + for assattrs in node.instance_attrs.values(): + for assattr in assattrs: + self.handle_assattr_type(assattr, node) + # resolve implemented interface + try: + node.implements = list(node.interfaces(self.inherited_interfaces)) + except InferenceError: + node.implements = () + + def visit_function(self, node): + """visit an astroid.Function node + + * set the locals_type mapping + * optionally tag the node with a unique id + """ + if hasattr(node, 'locals_type'): + return + node.locals_type = {} + if self.tag: + node.uid = self.generate_id() + + link_project = visit_project + link_module = visit_module + link_class = visit_class + link_function = visit_function + + def visit_assname(self, node): + """visit an astroid.AssName node + + handle locals_type + """ + # avoid double parsing done by different Linkers.visit + # running over the same project: + if hasattr(node, '_handled'): + return + node._handled = True + if node.name in node.frame(): + frame = node.frame() + else: + # the name has been defined as 'global' in the frame and belongs + # there. Btw the frame is not yet visited as the name is in the + # root locals; the frame hence has no locals_type attribute + frame = node.root() + try: + values = node.infered() + try: + already_infered = frame.locals_type[node.name] + for valnode in values: + if not valnode in already_infered: + already_infered.append(valnode) + except KeyError: + frame.locals_type[node.name] = values + except astroid.InferenceError: + pass + + def handle_assattr_type(self, node, parent): + """handle an astroid.AssAttr node + + handle instance_attrs_type + """ + try: + values = list(node.infer()) + try: + already_infered = parent.instance_attrs_type[node.attrname] + for valnode in values: + if not valnode in already_infered: + already_infered.append(valnode) + except KeyError: + parent.instance_attrs_type[node.attrname] = values + except astroid.InferenceError: + pass + + def visit_import(self, node): + """visit an astroid.Import node + + resolve module dependencies + """ + context_file = node.root().file + for name in node.names: + relative = is_relative(name[0], context_file) + self._imported_module(node, name[0], relative) + + + def visit_from(self, node): + """visit an astroid.From node + + resolve module dependencies + """ + basename = node.modname + context_file = node.root().file + if context_file is not None: + relative = is_relative(basename, context_file) + else: + relative = False + for name in node.names: + if name[0] == '*': + continue + # analyze dependencies + fullname = '%s.%s' % (basename, name[0]) + if fullname.find('.') > -1: + try: + # XXX: don't use get_module_part, missing package precedence + fullname = get_module_part(fullname, context_file) + except ImportError: + continue + if fullname != basename: + self._imported_module(node, fullname, relative) + + + def compute_module(self, context_name, mod_path): + """return true if the module should be added to dependencies""" + package_dir = dirname(self.project.path) + if context_name == mod_path: + return 0 + elif is_standard_module(mod_path, (package_dir,)): + return 1 + return 0 + + # protected methods ######################################################## + + def _imported_module(self, node, mod_path, relative): + """notify an imported module, used to analyze dependencies + """ + module = node.root() + context_name = module.name + if relative: + mod_path = '%s.%s' % ('.'.join(context_name.split('.')[:-1]), + mod_path) + if self.compute_module(context_name, mod_path): + # handle dependencies + if not hasattr(module, 'depends'): + module.depends = [] + mod_paths = module.depends + if not mod_path in mod_paths: + mod_paths.append(mod_path) diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/manager.py b/pymode/libs/astroid/manager.py similarity index 73% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/manager.py rename to pymode/libs/astroid/manager.py index 058e845e..b1fb3058 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/manager.py +++ b/pymode/libs/astroid/manager.py @@ -19,27 +19,31 @@ possible by providing a class responsible to get astroid representation from various source and using a cache of built modules) """ +from __future__ import print_function __docformat__ = "restructuredtext en" +import collections +import imp import os from os.path import dirname, join, isdir, exists +from warnings import warn +import zipimport -from logilab.common.modutils import NoSourceFile, is_python_source, \ - file_from_modpath, load_module_from_name, modpath_from_file, \ - get_module_files, get_source_file, zipimport from logilab.common.configuration import OptionsProviderMixIn from astroid.exceptions import AstroidBuildingException +from astroid import modutils + def astroid_wrapper(func, modname): """wrapper to give to AstroidManager.project_from_files""" - print 'parsing %s...' % modname + print('parsing %s...' % modname) try: return func(modname) - except AstroidBuildingException, exc: - print exc - except Exception, exc: + except AstroidBuildingException as exc: + print(exc) + except Exception as exc: import traceback traceback.print_exc() @@ -73,7 +77,7 @@ class AstroidManager(OptionsProviderMixIn): {'default': "No Name", 'type' : 'string', 'short': 'p', 'metavar' : '', 'help' : 'set the project name.'}), - ) + ) brain = {} def __init__(self): self.__dict__ = AstroidManager.brain @@ -83,18 +87,22 @@ def __init__(self): # NOTE: cache entries are added by the [re]builder self.astroid_cache = {} self._mod_file_cache = {} - self.transforms = {} + self.transforms = collections.defaultdict(list) + self._failed_import_hooks = [] + self.always_load_extensions = False + self.optimize_ast = False + self.extension_package_whitelist = set() def ast_from_file(self, filepath, modname=None, fallback=True, source=False): """given a module name, return the astroid object""" try: - filepath = get_source_file(filepath, include_no_ext=True) + filepath = modutils.get_source_file(filepath, include_no_ext=True) source = True - except NoSourceFile: + except modutils.NoSourceFile: pass if modname is None: try: - modname = '.'.join(modpath_from_file(filepath)) + modname = '.'.join(modutils.modpath_from_file(filepath)) except ImportError: modname = filepath if modname in self.astroid_cache and self.astroid_cache[modname].file == filepath: @@ -105,32 +113,58 @@ def ast_from_file(self, filepath, modname=None, fallback=True, source=False): elif fallback and modname: return self.ast_from_module_name(modname) raise AstroidBuildingException('unable to get astroid for file %s' % - filepath) + filepath) + + def _build_stub_module(self, modname): + from astroid.builder import AstroidBuilder + return AstroidBuilder(self).string_build('', modname) + + def _can_load_extension(self, modname): + if self.always_load_extensions: + return True + if modutils.is_standard_module(modname): + return True + parts = modname.split('.') + return any( + '.'.join(parts[:x]) in self.extension_package_whitelist + for x in range(1, len(parts) + 1)) def ast_from_module_name(self, modname, context_file=None): """given a module name, return the astroid object""" if modname in self.astroid_cache: return self.astroid_cache[modname] if modname == '__main__': - from astroid.builder import AstroidBuilder - return AstroidBuilder(self).string_build('', modname) + return self._build_stub_module(modname) old_cwd = os.getcwd() if context_file: os.chdir(dirname(context_file)) try: - filepath = self.file_from_module_name(modname, context_file) - if filepath is not None and not is_python_source(filepath): + filepath, mp_type = self.file_from_module_name(modname, context_file) + if mp_type == modutils.PY_ZIPMODULE: module = self.zip_import_data(filepath) if module is not None: return module - if filepath is None or not is_python_source(filepath): + elif mp_type in (imp.C_BUILTIN, imp.C_EXTENSION): + if mp_type == imp.C_EXTENSION and not self._can_load_extension(modname): + return self._build_stub_module(modname) try: - module = load_module_from_name(modname) - except Exception, ex: + module = modutils.load_module_from_name(modname) + except Exception as ex: msg = 'Unable to load module %s (%s)' % (modname, ex) raise AstroidBuildingException(msg) return self.ast_from_module(module, modname) + elif mp_type == imp.PY_COMPILED: + raise AstroidBuildingException("Unable to load compiled module %s" % (modname,)) + if filepath is None: + raise AstroidBuildingException("Unable to load module %s" % (modname,)) return self.ast_from_file(filepath, modname, fallback=False) + except AstroidBuildingException as e: + for hook in self._failed_import_hooks: + try: + return hook(modname) + except AstroidBuildingException: + pass + raise e finally: os.chdir(old_cwd) @@ -141,14 +175,14 @@ def zip_import_data(self, filepath): builder = AstroidBuilder(self) for ext in ('.zip', '.egg'): try: - eggpath, resource = filepath.rsplit(ext + '/', 1) + eggpath, resource = filepath.rsplit(ext + os.path.sep, 1) except ValueError: continue try: importer = zipimport.zipimporter(eggpath + ext) - zmodname = resource.replace('/', '.') + zmodname = resource.replace(os.path.sep, '.') if importer.is_package(resource): - zmodname = zmodname + '.__init__' + zmodname = zmodname + '.__init__' module = builder.string_build(importer.get_source(resource), zmodname, filepath) return module @@ -161,9 +195,9 @@ def file_from_module_name(self, modname, contextfile): value = self._mod_file_cache[(modname, contextfile)] except KeyError: try: - value = file_from_modpath(modname.split('.'), - context_file=contextfile) - except ImportError, ex: + value = modutils.file_info_from_modpath( + modname.split('.'), context_file=contextfile) + except ImportError as ex: msg = 'Unable to load module %s (%s)' % (modname, ex) value = AstroidBuildingException(msg) self._mod_file_cache[(modname, contextfile)] = value @@ -179,7 +213,7 @@ def ast_from_module(self, module, modname=None): try: # some builtin modules don't have __file__ attribute filepath = module.__file__ - if is_python_source(filepath): + if modutils.is_python_source(filepath): return self.ast_from_file(filepath, modname) except AttributeError: pass @@ -209,7 +243,7 @@ def infer_ast_from_something(self, obj, context=None): except AttributeError: raise AstroidBuildingException( 'Unable to get module for %s' % safe_repr(klass)) - except Exception, ex: + except Exception as ex: raise AstroidBuildingException( 'Unexpected error while retrieving module for %s: %s' % (safe_repr(klass), ex)) @@ -218,7 +252,7 @@ def infer_ast_from_something(self, obj, context=None): except AttributeError: raise AstroidBuildingException( 'Unable to get name for %s' % safe_repr(klass)) - except Exception, ex: + except Exception as ex: raise AstroidBuildingException( 'Unexpected error while retrieving name for %s: %s' % (safe_repr(klass), ex)) @@ -240,7 +274,7 @@ def project_from_files(self, files, func_wrapper=astroid_wrapper, project = Project(project_name) for something in files: if not exists(something): - fpath = file_from_modpath(something.split('.')) + fpath = modutils.file_from_modpath(something.split('.')) elif isdir(something): fpath = join(something, '__init__.py') else: @@ -255,8 +289,8 @@ def project_from_files(self, files, func_wrapper=astroid_wrapper, # recurse in package except if __init__ was explicitly given if astroid.package and something.find('__init__') == -1: # recurse on others packages / modules if this is a package - for fpath in get_module_files(dirname(astroid.file), - black_list): + for fpath in modutils.get_module_files(dirname(astroid.file), + black_list): astroid = func_wrapper(self.ast_from_file, fpath) if astroid is None or astroid.name == base_name: continue @@ -265,17 +299,27 @@ def project_from_files(self, files, func_wrapper=astroid_wrapper, def register_transform(self, node_class, transform, predicate=None): """Register `transform(node)` function to be applied on the given - Astroid's `node_class` if `predicate` is None or return a true value + Astroid's `node_class` if `predicate` is None or returns true when called with the node as argument. The transform function may return a value which is then used to substitute the original node in the tree. """ - self.transforms.setdefault(node_class, []).append( (transform, predicate) ) + self.transforms[node_class].append((transform, predicate)) def unregister_transform(self, node_class, transform, predicate=None): """Unregister the given transform.""" - self.transforms[node_class].remove( (transform, predicate) ) + self.transforms[node_class].remove((transform, predicate)) + + def register_failed_import_hook(self, hook): + """Registers a hook to resolve imports that cannot be found otherwise. + + `hook` must be a function that accepts a single argument `modname` which + contains the name of the module or package that could not be imported. + If `hook` can resolve the import, must return a node of type `astroid.Module`, + otherwise, it must raise `AstroidBuildingException`. + """ + self._failed_import_hooks.append(hook) def transform(self, node): """Call matching transforms for the given node if any and return the @@ -297,7 +341,7 @@ def transform(self, node): if node is not orig_node: # node has already be modified by some previous # transformation, warn about it - warn('node %s substitued multiple times' % node) + warn('node %s substituted multiple times' % node) node = ret return node @@ -305,6 +349,17 @@ def cache_module(self, module): """Cache a module if no module with the same name is known yet.""" self.astroid_cache.setdefault(module.name, module) + def clear_cache(self, astroid_builtin=None): + # XXX clear transforms + self.astroid_cache.clear() + # force bootstrap again, else we may ends up with cache inconsistency + # between the manager and CONST_PROXY, making + # unittest_lookup.LookupTC.test_builtin_lookup fail depending on the + # test order + import astroid.raw_building + astroid.raw_building._astroid_bootstrapping( + astroid_builtin=astroid_builtin) + class Project(object): """a project handle a set of modules / packages""" diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/mixins.py b/pymode/libs/astroid/mixins.py similarity index 92% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/mixins.py rename to pymode/libs/astroid/mixins.py index 5e7b7878..dbf1673a 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/mixins.py +++ b/pymode/libs/astroid/mixins.py @@ -18,16 +18,18 @@ """This module contains some mixins for the different nodes. """ +from logilab.common.decorators import cachedproperty + from astroid.exceptions import (AstroidBuildingException, InferenceError, - NotFoundError) + NotFoundError) class BlockRangeMixIn(object): """override block range """ - def set_line_info(self, lastchild): - self.fromlineno = self.lineno - self.tolineno = lastchild.tolineno - self.blockstart_tolineno = self._blockstart_toline() + + @cachedproperty + def blockstart_tolineno(self): + return self.lineno def _elsed_block_range(self, lineno, orelse, last=None): """handle block line numbers range for try/finally, for, if and while @@ -85,7 +87,7 @@ class FromImportMixIn(FilterStmtsMixin): def _infer_name(self, frame, name): return name - def do_import_module(self, modname): + def do_import_module(self, modname=None): """return the ast for a module whose name is imported by """ # handle special case where we are on a package node importing a module @@ -94,6 +96,8 @@ def do_import_module(self, modname): # XXX: no more needed ? mymodule = self.root() level = getattr(self, 'level', None) # Import as no level + if modname is None: + modname = self.modname # XXX we should investigate deeper if we really want to check # importing itself: modname and mymodule.name be relative or absolute if mymodule.relative_to_absolute_name(modname, level) == mymodule.name: @@ -103,7 +107,7 @@ def do_import_module(self, modname): return mymodule.import_module(modname, level=level) except AstroidBuildingException: raise InferenceError(modname) - except SyntaxError, ex: + except SyntaxError as ex: raise InferenceError(str(ex)) def real_name(self, asname): @@ -118,5 +122,3 @@ def real_name(self, asname): return name raise NotFoundError(asname) - - diff --git a/pymode/libs/astroid/modutils.py b/pymode/libs/astroid/modutils.py new file mode 100644 index 00000000..c547f3e6 --- /dev/null +++ b/pymode/libs/astroid/modutils.py @@ -0,0 +1,670 @@ +# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of astroid. +# +# astroid is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# astroid is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with astroid. If not, see . +"""Python modules manipulation utility functions. + +:type PY_SOURCE_EXTS: tuple(str) +:var PY_SOURCE_EXTS: list of possible python source file extension + +:type STD_LIB_DIRS: set of str +:var STD_LIB_DIRS: directories where standard modules are located + +:type BUILTIN_MODULES: dict +:var BUILTIN_MODULES: dictionary with builtin module names has key +""" +from __future__ import with_statement + +__docformat__ = "restructuredtext en" + +import imp +import os +import sys +from distutils.sysconfig import get_python_lib +from distutils.errors import DistutilsPlatformError +import zipimport + +try: + import pkg_resources +except ImportError: + pkg_resources = None + +from logilab.common import _handle_blacklist + +PY_ZIPMODULE = object() + +if sys.platform.startswith('win'): + PY_SOURCE_EXTS = ('py', 'pyw') + PY_COMPILED_EXTS = ('dll', 'pyd') +else: + PY_SOURCE_EXTS = ('py',) + PY_COMPILED_EXTS = ('so',) + +# Notes about STD_LIB_DIRS +# Consider arch-specific installation for STD_LIB_DIRS definition +# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on +# +# :see: `Problems with /usr/lib64 builds `_ +# :see: `FHS `_ +try: + # The explicit sys.prefix is to work around a patch in virtualenv that + # replaces the 'real' sys.prefix (i.e. the location of the binary) + # with the prefix from which the virtualenv was created. This throws + # off the detection logic for standard library modules, thus the + # workaround. + STD_LIB_DIRS = set([ + get_python_lib(standard_lib=True, prefix=sys.prefix), + # Take care of installations where exec_prefix != prefix. + get_python_lib(standard_lib=True, prefix=sys.exec_prefix), + get_python_lib(standard_lib=True)]) + if os.name == 'nt': + STD_LIB_DIRS.add(os.path.join(sys.prefix, 'dlls')) + try: + # real_prefix is defined when running inside virtualenv. + STD_LIB_DIRS.add(os.path.join(sys.real_prefix, 'dlls')) + except AttributeError: + pass +# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to +# non-valid path, see https://bugs.pypy.org/issue1164 +except DistutilsPlatformError: + STD_LIB_DIRS = set() + +EXT_LIB_DIR = get_python_lib() + +BUILTIN_MODULES = dict(zip(sys.builtin_module_names, + [1]*len(sys.builtin_module_names))) + + +class NoSourceFile(Exception): + """exception raised when we are not able to get a python + source file for a precompiled file + """ + +def _normalize_path(path): + return os.path.normcase(os.path.abspath(path)) + + +_NORM_PATH_CACHE = {} + +def _cache_normalize_path(path): + """abspath with caching""" + # _module_file calls abspath on every path in sys.path every time it's + # called; on a larger codebase this easily adds up to half a second just + # assembling path components. This cache alleviates that. + try: + return _NORM_PATH_CACHE[path] + except KeyError: + if not path: # don't cache result for '' + return _normalize_path(path) + result = _NORM_PATH_CACHE[path] = _normalize_path(path) + return result + +def load_module_from_name(dotted_name, path=None, use_sys=1): + """Load a Python module from its name. + + :type dotted_name: str + :param dotted_name: python name of a module or package + + :type path: list or None + :param path: + optional list of path where the module or package should be + searched (use sys.path if nothing or None is given) + + :type use_sys: bool + :param use_sys: + boolean indicating whether the sys.modules dictionary should be + used or not + + + :raise ImportError: if the module or package is not found + + :rtype: module + :return: the loaded module + """ + return load_module_from_modpath(dotted_name.split('.'), path, use_sys) + + +def load_module_from_modpath(parts, path=None, use_sys=1): + """Load a python module from its splitted name. + + :type parts: list(str) or tuple(str) + :param parts: + python name of a module or package splitted on '.' + + :type path: list or None + :param path: + optional list of path where the module or package should be + searched (use sys.path if nothing or None is given) + + :type use_sys: bool + :param use_sys: + boolean indicating whether the sys.modules dictionary should be used or not + + :raise ImportError: if the module or package is not found + + :rtype: module + :return: the loaded module + """ + if use_sys: + try: + return sys.modules['.'.join(parts)] + except KeyError: + pass + modpath = [] + prevmodule = None + for part in parts: + modpath.append(part) + curname = '.'.join(modpath) + module = None + if len(modpath) != len(parts): + # even with use_sys=False, should try to get outer packages from sys.modules + module = sys.modules.get(curname) + elif use_sys: + # because it may have been indirectly loaded through a parent + module = sys.modules.get(curname) + if module is None: + mp_file, mp_filename, mp_desc = imp.find_module(part, path) + module = imp.load_module(curname, mp_file, mp_filename, mp_desc) + # mp_file still needs to be closed. + if mp_file: + mp_file.close() + if prevmodule: + setattr(prevmodule, part, module) + _file = getattr(module, '__file__', '') + if not _file and len(modpath) != len(parts): + raise ImportError('no module in %s' % '.'.join(parts[len(modpath):])) + path = [os.path.dirname(_file)] + prevmodule = module + return module + + +def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None): + """Load a Python module from it's path. + + :type filepath: str + :param filepath: path to the python module or package + + :type path: list or None + :param path: + optional list of path where the module or package should be + searched (use sys.path if nothing or None is given) + + :type use_sys: bool + :param use_sys: + boolean indicating whether the sys.modules dictionary should be + used or not + + + :raise ImportError: if the module or package is not found + + :rtype: module + :return: the loaded module + """ + modpath = modpath_from_file(filepath, extrapath) + return load_module_from_modpath(modpath, path, use_sys) + + +def _check_init(path, mod_path): + """check there are some __init__.py all along the way""" + for part in mod_path: + path = os.path.join(path, part) + if not _has_init(path): + return False + return True + + +def modpath_from_file(filename, extrapath=None): + """given a file path return the corresponding splitted module's name + (i.e name of a module or package splitted on '.') + + :type filename: str + :param filename: file's path for which we want the module's name + + :type extrapath: dict + :param extrapath: + optional extra search path, with path as key and package name for the path + as value. This is usually useful to handle package splitted in multiple + directories using __path__ trick. + + + :raise ImportError: + if the corresponding module's name has not been found + + :rtype: list(str) + :return: the corresponding splitted module's name + """ + base = os.path.splitext(os.path.abspath(filename))[0] + if extrapath is not None: + for path_ in extrapath: + path = os.path.abspath(path_) + if path and os.path.normcase(base[:len(path)]) == os.path.normcase(path): + submodpath = [pkg for pkg in base[len(path):].split(os.sep) + if pkg] + if _check_init(path, submodpath[:-1]): + return extrapath[path_].split('.') + submodpath + for path in sys.path: + path = _cache_normalize_path(path) + if path and os.path.normcase(base).startswith(path): + modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg] + if _check_init(path, modpath[:-1]): + return modpath + raise ImportError('Unable to find module for %s in %s' % ( + filename, ', \n'.join(sys.path))) + + +def file_from_modpath(modpath, path=None, context_file=None): + return file_info_from_modpath(modpath, path, context_file)[0] + +def file_info_from_modpath(modpath, path=None, context_file=None): + """given a mod path (i.e. splitted module / package name), return the + corresponding file, giving priority to source file over precompiled + file if it exists + + :type modpath: list or tuple + :param modpath: + splitted module's name (i.e name of a module or package splitted + on '.') + (this means explicit relative imports that start with dots have + empty strings in this list!) + + :type path: list or None + :param path: + optional list of path where the module or package should be + searched (use sys.path if nothing or None is given) + + :type context_file: str or None + :param context_file: + context file to consider, necessary if the identifier has been + introduced using a relative import unresolvable in the actual + context (i.e. modutils) + + :raise ImportError: if there is no such module in the directory + + :rtype: (str or None, import type) + :return: + the path to the module's file or None if it's an integrated + builtin module such as 'sys' + """ + if context_file is not None: + context = os.path.dirname(context_file) + else: + context = context_file + if modpath[0] == 'xml': + # handle _xmlplus + try: + return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context) + except ImportError: + return _file_from_modpath(modpath, path, context) + elif modpath == ['os', 'path']: + # FIXME: currently ignoring search_path... + return os.path.__file__, imp.PY_SOURCE + return _file_from_modpath(modpath, path, context) + + +def get_module_part(dotted_name, context_file=None): + """given a dotted name return the module part of the name : + + >>> get_module_part('logilab.common.modutils.get_module_part') + 'logilab.common.modutils' + + :type dotted_name: str + :param dotted_name: full name of the identifier we are interested in + + :type context_file: str or None + :param context_file: + context file to consider, necessary if the identifier has been + introduced using a relative import unresolvable in the actual + context (i.e. modutils) + + + :raise ImportError: if there is no such module in the directory + + :rtype: str or None + :return: + the module part of the name or None if we have not been able at + all to import the given name + + XXX: deprecated, since it doesn't handle package precedence over module + (see #10066) + """ + # os.path trick + if dotted_name.startswith('os.path'): + return 'os.path' + parts = dotted_name.split('.') + if context_file is not None: + # first check for builtin module which won't be considered latter + # in that case (path != None) + if parts[0] in BUILTIN_MODULES: + if len(parts) > 2: + raise ImportError(dotted_name) + return parts[0] + # don't use += or insert, we want a new list to be created ! + path = None + starti = 0 + if parts[0] == '': + assert context_file is not None, \ + 'explicit relative import, but no context_file?' + path = [] # prevent resolving the import non-relatively + starti = 1 + while parts[starti] == '': # for all further dots: change context + starti += 1 + context_file = os.path.dirname(context_file) + for i in range(starti, len(parts)): + try: + file_from_modpath(parts[starti:i+1], path=path, + context_file=context_file) + except ImportError: + if not i >= max(1, len(parts) - 2): + raise + return '.'.join(parts[:i]) + return dotted_name + + +def get_module_files(src_directory, blacklist): + """given a package directory return a list of all available python + module's files in the package and its subpackages + + :type src_directory: str + :param src_directory: + path of the directory corresponding to the package + + :type blacklist: list or tuple + :param blacklist: + optional list of files or directory to ignore, default to the value of + `logilab.common.STD_BLACKLIST` + + :rtype: list + :return: + the list of all available python module's files in the package and + its subpackages + """ + files = [] + for directory, dirnames, filenames in os.walk(src_directory): + _handle_blacklist(blacklist, dirnames, filenames) + # check for __init__.py + if not '__init__.py' in filenames: + dirnames[:] = () + continue + for filename in filenames: + if _is_python_file(filename): + src = os.path.join(directory, filename) + files.append(src) + return files + + +def get_source_file(filename, include_no_ext=False): + """given a python module's file name return the matching source file + name (the filename will be returned identically if it's a already an + absolute path to a python source file...) + + :type filename: str + :param filename: python module's file name + + + :raise NoSourceFile: if no source file exists on the file system + + :rtype: str + :return: the absolute path of the source file if it exists + """ + base, orig_ext = os.path.splitext(os.path.abspath(filename)) + for ext in PY_SOURCE_EXTS: + source_path = '%s.%s' % (base, ext) + if os.path.exists(source_path): + return source_path + if include_no_ext and not orig_ext and os.path.exists(base): + return base + raise NoSourceFile(filename) + + +def is_python_source(filename): + """ + rtype: bool + return: True if the filename is a python source file + """ + return os.path.splitext(filename)[1][1:] in PY_SOURCE_EXTS + + +def is_standard_module(modname, std_path=None): + """try to guess if a module is a standard python module (by default, + see `std_path` parameter's description) + + :type modname: str + :param modname: name of the module we are interested in + + :type std_path: list(str) or tuple(str) + :param std_path: list of path considered has standard + + + :rtype: bool + :return: + true if the module: + - is located on the path listed in one of the directory in `std_path` + - is a built-in module + """ + modname = modname.split('.')[0] + try: + filename = file_from_modpath([modname]) + except ImportError: + # import failed, i'm probably not so wrong by supposing it's + # not standard... + return False + # modules which are not living in a file are considered standard + # (sys and __builtin__ for instance) + if filename is None: + return True + filename = _normalize_path(filename) + if filename.startswith(_cache_normalize_path(EXT_LIB_DIR)): + return False + if std_path is None: + std_path = STD_LIB_DIRS + for path in std_path: + if filename.startswith(_cache_normalize_path(path)): + return True + return False + + + +def is_relative(modname, from_file): + """return true if the given module name is relative to the given + file name + + :type modname: str + :param modname: name of the module we are interested in + + :type from_file: str + :param from_file: + path of the module from which modname has been imported + + :rtype: bool + :return: + true if the module has been imported relatively to `from_file` + """ + if not os.path.isdir(from_file): + from_file = os.path.dirname(from_file) + if from_file in sys.path: + return False + try: + stream, _, _ = imp.find_module(modname.split('.')[0], [from_file]) + + # Close the stream to avoid ResourceWarnings. + if stream: + stream.close() + return True + except ImportError: + return False + + +# internal only functions ##################################################### + +def _file_from_modpath(modpath, path=None, context=None): + """given a mod path (i.e. splitted module / package name), return the + corresponding file + + this function is used internally, see `file_from_modpath`'s + documentation for more information + """ + assert len(modpath) > 0 + if context is not None: + try: + mtype, mp_filename = _module_file(modpath, [context]) + except ImportError: + mtype, mp_filename = _module_file(modpath, path) + else: + mtype, mp_filename = _module_file(modpath, path) + if mtype == imp.PY_COMPILED: + try: + return get_source_file(mp_filename), imp.PY_SOURCE + except NoSourceFile: + return mp_filename, imp.PY_COMPILED + elif mtype == imp.C_BUILTIN: + # integrated builtin module + return None, imp.C_BUILTIN + elif mtype == imp.PKG_DIRECTORY: + mp_filename = _has_init(mp_filename) + mtype = imp.PY_SOURCE + return mp_filename, mtype + +def _search_zip(modpath, pic): + for filepath, importer in pic.items(): + if importer is not None: + if importer.find_module(modpath[0]): + if not importer.find_module(os.path.sep.join(modpath)): + raise ImportError('No module named %s in %s/%s' % ( + '.'.join(modpath[1:]), filepath, modpath)) + return PY_ZIPMODULE, os.path.abspath(filepath) + os.path.sep + os.path.sep.join(modpath), filepath + raise ImportError('No module named %s' % '.'.join(modpath)) + + +def _module_file(modpath, path=None): + """get a module type / file path + + :type modpath: list or tuple + :param modpath: + splitted module's name (i.e name of a module or package splitted + on '.'), with leading empty strings for explicit relative import + + :type path: list or None + :param path: + optional list of path where the module or package should be + searched (use sys.path if nothing or None is given) + + + :rtype: tuple(int, str) + :return: the module type flag and the file path for a module + """ + # egg support compat + try: + pic = sys.path_importer_cache + _path = (path is None and sys.path or path) + for __path in _path: + if not __path in pic: + try: + pic[__path] = zipimport.zipimporter(__path) + except zipimport.ZipImportError: + pic[__path] = None + checkeggs = True + except AttributeError: + checkeggs = False + # pkg_resources support (aka setuptools namespace packages) + if (pkg_resources is not None + and modpath[0] in pkg_resources._namespace_packages + and modpath[0] in sys.modules + and len(modpath) > 1): + # setuptools has added into sys.modules a module object with proper + # __path__, get back information from there + module = sys.modules[modpath.pop(0)] + path = module.__path__ + imported = [] + while modpath: + modname = modpath[0] + # take care to changes in find_module implementation wrt builtin modules + # + # Python 2.6.6 (r266:84292, Sep 11 2012, 08:34:23) + # >>> imp.find_module('posix') + # (None, 'posix', ('', '', 6)) + # + # Python 3.3.1 (default, Apr 26 2013, 12:08:46) + # >>> imp.find_module('posix') + # (None, None, ('', '', 6)) + try: + stream, mp_filename, mp_desc = imp.find_module(modname, path) + except ImportError: + if checkeggs: + return _search_zip(modpath, pic)[:2] + raise + else: + # Don't forget to close the stream to avoid + # spurious ResourceWarnings. + if stream: + stream.close() + + if checkeggs and mp_filename: + fullabspath = [_cache_normalize_path(x) for x in _path] + try: + pathindex = fullabspath.index(os.path.dirname(_normalize_path(mp_filename))) + emtype, emp_filename, zippath = _search_zip(modpath, pic) + if pathindex > _path.index(zippath): + # an egg takes priority + return emtype, emp_filename + except ValueError: + # XXX not in _path + pass + except ImportError: + pass + checkeggs = False + imported.append(modpath.pop(0)) + mtype = mp_desc[2] + if modpath: + if mtype != imp.PKG_DIRECTORY: + raise ImportError('No module %s in %s' % ('.'.join(modpath), + '.'.join(imported))) + # XXX guess if package is using pkgutil.extend_path by looking for + # those keywords in the first four Kbytes + try: + with open(os.path.join(mp_filename, '__init__.py'), 'rb') as stream: + data = stream.read(4096) + except IOError: + path = [mp_filename] + else: + if b'pkgutil' in data and b'extend_path' in data: + # extend_path is called, search sys.path for module/packages + # of this name see pkgutil.extend_path documentation + path = [os.path.join(p, *imported) for p in sys.path + if os.path.isdir(os.path.join(p, *imported))] + else: + path = [mp_filename] + return mtype, mp_filename + +def _is_python_file(filename): + """return true if the given filename should be considered as a python file + + .pyc and .pyo are ignored + """ + for ext in ('.py', '.so', '.pyd', '.pyw'): + if filename.endswith(ext): + return True + return False + + +def _has_init(directory): + """if the given directory has a valid __init__ file, return its path, + else return None + """ + mod_or_pack = os.path.join(directory, '__init__') + for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'): + if os.path.exists(mod_or_pack + '.' + ext): + return mod_or_pack + '.' + ext + return None diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/node_classes.py b/pymode/libs/astroid/node_classes.py similarity index 88% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/node_classes.py rename to pymode/libs/astroid/node_classes.py index 01dc8d92..4b413ef8 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/node_classes.py +++ b/pymode/libs/astroid/node_classes.py @@ -20,11 +20,16 @@ import sys +import six +from logilab.common.decorators import cachedproperty + from astroid.exceptions import NoDefault from astroid.bases import (NodeNG, Statement, Instance, InferenceContext, - _infer_stmts, YES, BUILTINS) -from astroid.mixins import BlockRangeMixIn, AssignTypeMixin, \ - ParentAssignTypeMixin, FromImportMixIn + _infer_stmts, YES, BUILTINS) +from astroid.mixins import (BlockRangeMixIn, AssignTypeMixin, + ParentAssignTypeMixin, FromImportMixIn) + +PY3K = sys.version_info >= (3, 0) def unpack_infer(stmt, context=None): @@ -37,7 +42,7 @@ def unpack_infer(stmt, context=None): yield infered_elt return # if infered is a final node, return it and stop - infered = stmt.infer(context).next() + infered = next(stmt.infer(context)) if infered is stmt: yield infered return @@ -82,16 +87,16 @@ def are_exclusive(stmt1, stmt2, exceptions=None): # nodes are in exclusive branches if isinstance(node, If) and exceptions is None: if (node.locate_child(previous)[1] - is not node.locate_child(children[node])[1]): + is not node.locate_child(children[node])[1]): return True elif isinstance(node, TryExcept): c2attr, c2node = node.locate_child(previous) c1attr, c1node = node.locate_child(children[node]) if c1node is not c2node: if ((c2attr == 'body' and c1attr == 'handlers' and children[node].catch(exceptions)) or - (c2attr == 'handlers' and c1attr == 'body' and previous.catch(exceptions)) or - (c2attr == 'handlers' and c1attr == 'orelse') or - (c2attr == 'orelse' and c1attr == 'handlers')): + (c2attr == 'handlers' and c1attr == 'body' and previous.catch(exceptions)) or + (c2attr == 'handlers' and c1attr == 'orelse') or + (c2attr == 'orelse' and c1attr == 'handlers')): return True elif c2attr == 'handlers' and c1attr == 'handlers': return previous is not children[node] @@ -108,13 +113,13 @@ class LookupMixIn(object): def lookup(self, name): """lookup a variable name - return the scope node and the list of assignments associated to the given - name according to the scope where it has been found (locals, globals or - builtin) + return the scope node and the list of assignments associated to the + given name according to the scope where it has been found (locals, + globals or builtin) - The lookup is starting from self's scope. If self is not a frame itself and - the name is found in the inner frame locals, statements will be filtered - to remove ignorable statements according to self's location + The lookup is starting from self's scope. If self is not a frame itself + and the name is found in the inner frame locals, statements will be + filtered to remove ignorable statements according to self's location """ return self.scope().scope_lookup(self, name) @@ -144,6 +149,20 @@ def _filter_stmts(self, stmts, frame, offset): myframe = self.frame().parent.frame() else: myframe = self.frame() + # If the frame of this node is the same as the statement + # of this node, then the node is part of a class or + # a function definition and the frame of this node should be the + # the upper frame, not the frame of the definition. + # For more information why this is important, + # see Pylint issue #295. + # For example, for 'b', the statement is the same + # as the frame / scope: + # + # def test(b=1): + # ... + + if self.statement() is myframe and myframe.parent: + myframe = myframe.parent.frame() if not myframe is frame or self is frame: return stmts mystmt = self.statement() @@ -253,7 +272,26 @@ class Name(LookupMixIn, NodeNG): class Arguments(NodeNG, AssignTypeMixin): """class representing an Arguments node""" - _astroid_fields = ('args', 'defaults', 'kwonlyargs', 'kw_defaults') + if PY3K: + # Python 3.4+ uses a different approach regarding annotations, + # each argument is a new class, _ast.arg, which exposes an + # 'annotation' attribute. In astroid though, arguments are exposed + # as is in the Arguments node and the only way to expose annotations + # is by using something similar with Python 3.3: + # - we expose 'varargannotation' and 'kwargannotation' of annotations + # of varargs and kwargs. + # - we expose 'annotation', a list with annotations for + # for each normal argument. If an argument doesn't have an + # annotation, its value will be None. + + _astroid_fields = ('args', 'defaults', 'kwonlyargs', + 'kw_defaults', 'annotations', + 'varargannotation', 'kwargannotation') + annotations = None + varargannotation = None + kwargannotation = None + else: + _astroid_fields = ('args', 'defaults', 'kwonlyargs', 'kw_defaults') args = None defaults = None kwonlyargs = None @@ -268,6 +306,11 @@ def _infer_name(self, frame, name): return name return None + @cachedproperty + def fromlineno(self): + lineno = super(Arguments, self).fromlineno + return max(lineno, self.parent.fromlineno or 0) + def format_args(self): """return arguments formatted as string""" result = [] @@ -423,7 +466,7 @@ def last_child(self): class Comprehension(NodeNG): """class representing a Comprehension node""" - _astroid_fields = ('target', 'iter' ,'ifs') + _astroid_fields = ('target', 'iter', 'ifs') target = None iter = None ifs = None @@ -454,7 +497,7 @@ def __init__(self, value=None): self.value = value def getitem(self, index, context=None): - if isinstance(self.value, basestring): + if isinstance(self.value, six.string_types): return Const(self.value[index]) raise TypeError('%r (value=%s)' % (self, self.value)) @@ -462,7 +505,7 @@ def has_dynamic_getattr(self): return False def itered(self): - if isinstance(self.value, basestring): + if isinstance(self.value, six.string_types): return self.value raise TypeError() @@ -507,7 +550,7 @@ def __init__(self, items=None): self.items = [] else: self.items = [(const_factory(k), const_factory(v)) - for k,v in items.iteritems()] + for k, v in items.items()] def pytype(self): return '%s.dict' % BUILTINS @@ -533,7 +576,8 @@ def getitem(self, lookup_key, context=None): for inferedkey in key.infer(context): if inferedkey is YES: continue - if isinstance(inferedkey, Const) and inferedkey.value == lookup_key: + if isinstance(inferedkey, Const) \ + and inferedkey.value == lookup_key: return value # This should raise KeyError, but all call sites only catch # IndexError. Let's leave it like that for now. @@ -561,7 +605,8 @@ class ExceptHandler(Statement, AssignTypeMixin): name = None body = None - def _blockstart_toline(self): + @cachedproperty + def blockstart_tolineno(self): if self.name: return self.name.tolineno elif self.type: @@ -569,11 +614,6 @@ def _blockstart_toline(self): else: return self.lineno - def set_line_info(self, lastchild): - self.fromlineno = self.lineno - self.tolineno = lastchild.tolineno - self.blockstart_tolineno = self._blockstart_toline() - def catch(self, exceptions): if self.type is None or exceptions is None: return True @@ -604,14 +644,15 @@ class For(BlockRangeMixIn, AssignTypeMixin, Statement): orelse = None optional_assign = True - def _blockstart_toline(self): + @cachedproperty + def blockstart_tolineno(self): return self.iter.tolineno class From(FromImportMixIn, Statement): """class representing a From node""" - def __init__(self, fromname, names, level=0): + def __init__(self, fromname, names, level=0): self.modname = fromname self.names = names self.level = level @@ -639,7 +680,8 @@ class If(BlockRangeMixIn, Statement): body = None orelse = None - def _blockstart_toline(self): + @cachedproperty + def blockstart_tolineno(self): return self.test.tolineno def block_range(self, lineno): @@ -790,9 +832,6 @@ class TryExcept(BlockRangeMixIn, Statement): def _infer_name(self, frame, name): return name - def _blockstart_toline(self): - return self.lineno - def block_range(self, lineno): """handle block line numbers range for try/except statements""" last = None @@ -812,15 +851,12 @@ class TryFinally(BlockRangeMixIn, Statement): body = None finalbody = None - def _blockstart_toline(self): - return self.lineno - def block_range(self, lineno): """handle block line numbers range for try/finally statements""" child = self.body[0] # py2.5 try: except: finally: if (isinstance(child, TryExcept) and child.fromlineno == self.fromlineno - and lineno > self.fromlineno and lineno <= child.tolineno): + and lineno > self.fromlineno and lineno <= child.tolineno): return child.block_range(lineno) return self._elsed_block_range(lineno, self.finalbody) @@ -858,7 +894,8 @@ class While(BlockRangeMixIn, Statement): body = None orelse = None - def _blockstart_toline(self): + @cachedproperty + def blockstart_tolineno(self): return self.test.tolineno def block_range(self, lineno): @@ -872,7 +909,8 @@ class With(BlockRangeMixIn, AssignTypeMixin, Statement): items = None body = None - def _blockstart_toline(self): + @cachedproperty + def blockstart_tolineno(self): return self.items[-1][0].tolineno def get_children(self): @@ -889,7 +927,7 @@ class Yield(NodeNG): value = None class YieldFrom(Yield): - """ Class representing a YieldFrom node. """ + """ Class representing a YieldFrom node. """ # constants ############################################################## diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/nodes.py b/pymode/libs/astroid/nodes.py similarity index 98% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/nodes.py rename to pymode/libs/astroid/nodes.py index 263ab476..67c2f8e8 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/nodes.py +++ b/pymode/libs/astroid/nodes.py @@ -34,6 +34,7 @@ """ +# pylint: disable=unused-import __docformat__ = "restructuredtext en" diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/protocols.py b/pymode/libs/astroid/protocols.py similarity index 75% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/protocols.py rename to pymode/libs/astroid/protocols.py index e66b802c..4c11f9cf 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/protocols.py +++ b/pymode/libs/astroid/protocols.py @@ -20,14 +20,35 @@ """ __doctype__ = "restructuredtext en" +import collections -from astroid.exceptions import InferenceError, NoDefault +from astroid.exceptions import InferenceError, NoDefault, NotFoundError from astroid.node_classes import unpack_infer -from astroid.bases import copy_context, \ +from astroid.bases import InferenceContext, copy_context, \ raise_if_nothing_infered, yes_if_nothing_infered, Instance, YES from astroid.nodes import const_factory from astroid import nodes +BIN_OP_METHOD = {'+': '__add__', + '-': '__sub__', + '/': '__div__', + '//': '__floordiv__', + '*': '__mul__', + '**': '__power__', + '%': '__mod__', + '&': '__and__', + '|': '__or__', + '^': '__xor__', + '<<': '__lshift__', + '>>': '__rshift__', + } + +UNARY_OP_METHOD = {'+': '__pos__', + '-': '__neg__', + '~': '__invert__', + 'not': None, # XXX not '__nonzero__' + } + # unary operations ############################################################ def tl_infer_unary_op(self, operator): @@ -70,8 +91,8 @@ def const_infer_unary_op(self, operator): '^': lambda a, b: a ^ b, '<<': lambda a, b: a << b, '>>': lambda a, b: a >> b, - } -for key, impl in BIN_OP_IMPL.items(): + } +for key, impl in list(BIN_OP_IMPL.items()): BIN_OP_IMPL[key+'='] = impl def const_infer_binary_op(self, operator, other, context): @@ -133,6 +154,25 @@ def dict_infer_binary_op(self, operator, other, context): # XXX else log TypeError nodes.Dict.infer_binary_op = yes_if_nothing_infered(dict_infer_binary_op) +def instance_infer_binary_op(self, operator, other, context): + try: + methods = self.getattr(BIN_OP_METHOD[operator]) + except (NotFoundError, KeyError): + # Unknown operator + yield YES + else: + for method in methods: + if not isinstance(method, nodes.Function): + continue + for result in method.infer_call_result(self, context): + if result is not YES: + yield result + # We are interested only in the first infered method, + # don't go looking in the rest of the methods of the ancestors. + break + +Instance.infer_binary_op = yes_if_nothing_infered(instance_infer_binary_op) + # assignment ################################################################## @@ -166,7 +206,7 @@ def _resolve_looppart(parts, asspath, context): assigned = stmt.getitem(index, context) except (AttributeError, IndexError): continue - except TypeError, exc: # stmt is unsubscriptable Const + except TypeError: # stmt is unsubscriptable Const continue if not asspath: # we achieved to resolved the assignment path, @@ -231,10 +271,14 @@ def _arguments_infer_argname(self, name, context): yield self.parent.parent.frame() return if name == self.vararg: - yield const_factory(()) + vararg = const_factory(()) + vararg.parent = self + yield vararg return if name == self.kwarg: - yield const_factory({}) + kwarg = const_factory({}) + kwarg.parent = self + yield kwarg return # if there is a default value, yield it. And then yield YES to reflect # we can't guess given argument value @@ -253,11 +297,8 @@ def arguments_assigned_stmts(self, node, context, asspath=None): callcontext = context.callcontext context = copy_context(context) context.callcontext = None - for infered in callcontext.infer_argument(self.parent, node.name, context): - yield infered - return - for infered in _arguments_infer_argname(self, node.name, context): - yield infered + return callcontext.infer_argument(self.parent, node.name, context) + return _arguments_infer_argname(self, node.name, context) nodes.Arguments.assigned_stmts = arguments_assigned_stmts @@ -320,3 +361,55 @@ def with_assigned_stmts(self, node, context=None, asspath=None): nodes.With.assigned_stmts = raise_if_nothing_infered(with_assigned_stmts) +def starred_assigned_stmts(self, node=None, context=None, asspath=None): + stmt = self.statement() + if not isinstance(stmt, (nodes.Assign, nodes.For)): + raise InferenceError() + + if isinstance(stmt, nodes.Assign): + value = stmt.value + lhs = stmt.targets[0] + + if sum(1 for node in lhs.nodes_of_class(nodes.Starred)) > 1: + # Too many starred arguments in the expression. + raise InferenceError() + + if context is None: + context = InferenceContext() + try: + rhs = next(value.infer(context)) + except InferenceError: + yield YES + return + if rhs is YES or not hasattr(rhs, 'elts'): + # Not interested in inferred values without elts. + yield YES + return + + elts = collections.deque(rhs.elts[:]) + if len(lhs.elts) > len(rhs.elts): + # a, *b, c = (1, 2) + raise InferenceError() + + # Unpack iteratively the values from the rhs of the assignment, + # until the find the starred node. What will remain will + # be the list of values which the Starred node will represent + # This is done in two steps, from left to right to remove + # anything before the starred node and from right to left + # to remvoe anything after the starred node. + + for index, node in enumerate(lhs.elts): + if not isinstance(node, nodes.Starred): + elts.popleft() + continue + lhs_elts = collections.deque(reversed(lhs.elts[index:])) + for node in lhs_elts: + if not isinstance(node, nodes.Starred): + elts.pop() + continue + # We're done + for elt in elts: + yield elt + break + +nodes.Starred.assigned_stmts = starred_assigned_stmts diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/raw_building.py b/pymode/libs/astroid/raw_building.py similarity index 92% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/raw_building.py rename to pymode/libs/astroid/raw_building.py index bb685a9e..99a026a7 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/raw_building.py +++ b/pymode/libs/astroid/raw_building.py @@ -25,10 +25,11 @@ from os.path import abspath from inspect import (getargspec, isdatadescriptor, isfunction, ismethod, ismethoddescriptor, isclass, isbuiltin, ismodule) +import six from astroid.node_classes import CONST_CLS from astroid.nodes import (Module, Class, Const, const_factory, From, - Function, EmptyNode, Name, Arguments) + Function, EmptyNode, Name, Arguments) from astroid.bases import BUILTINS, Generator from astroid.manager import AstroidManager MANAGER = AstroidManager() @@ -57,7 +58,10 @@ def attach_dummy_node(node, name, object=_marker): enode.object = object _attach_local_node(node, enode, name) -EmptyNode.has_underlying_object = lambda self: self.object is not _marker +def _has_underlying_object(self): + return hasattr(self, 'object') and self.object is not _marker + +EmptyNode.has_underlying_object = _has_underlying_object def attach_const_node(node, name, value): """create a Const node and register it in the locals of the given @@ -150,7 +154,7 @@ def object_build_function(node, member, localname): if varkw is not None: args.append(varkw) func = build_function(getattr(member, '__name__', None) or localname, args, - defaults, member.func_code.co_flags, member.__doc__) + defaults, six.get_function_code(member).co_flags, member.__doc__) node.add_local_node(func, localname) def object_build_datadescriptor(node, member, name): @@ -247,10 +251,11 @@ def object_build(self, node, obj): attach_dummy_node(node, name) continue if ismethod(member): - member = member.im_func + member = six.get_method_function(member) if isfunction(member): # verify this is not an imported function - filename = getattr(member.func_code, 'co_filename', None) + filename = getattr(six.get_function_code(member), + 'co_filename', None) if filename is None: assert isinstance(member, object) object_build_methoddescriptor(node, member, name) @@ -258,11 +263,9 @@ def object_build(self, node, obj): attach_dummy_node(node, name, member) else: object_build_function(node, member, name) - elif isbuiltin(member): + elif isbuiltin(member): if (not _io_discrepancy(member) and - self.imported_member(node, member, name)): - #if obj is object: - # print 'skippp', obj, name, member + self.imported_member(node, member, name)): continue object_build_methoddescriptor(node, member, name) elif isclass(member): @@ -299,7 +302,7 @@ def imported_member(self, node, member, name): modname = getattr(member, '__module__', None) except: # XXX use logging - print 'unexpected error while building astroid from living object' + print('unexpected error while building astroid from living object') import traceback traceback.print_exc() modname = None @@ -325,16 +328,18 @@ def imported_member(self, node, member, name): return False -### astroid boot strapping ################################################### ### +### astroid bootstrapping ###################################################### Astroid_BUILDER = InspectBuilder() _CONST_PROXY = {} -def astroid_boot_strapping(): +def _astroid_bootstrapping(astroid_builtin=None): """astroid boot strapping the builtins module""" # this boot strapping is necessary since we need the Const nodes to # inspect_build builtins, and then we can proxy Const - from logilab.common.compat import builtins - astroid_builtin = Astroid_BUILDER.inspect_build(builtins) + if astroid_builtin is None: + from logilab.common.compat import builtins + astroid_builtin = Astroid_BUILDER.inspect_build(builtins) + for cls, node_cls in CONST_CLS.items(): if cls is type(None): proxy = build_class('NoneType') @@ -346,7 +351,7 @@ def astroid_boot_strapping(): else: _CONST_PROXY[cls] = proxy -astroid_boot_strapping() +_astroid_bootstrapping() # TODO : find a nicer way to handle this situation; # However __proxied introduced an diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/rebuilder.py b/pymode/libs/astroid/rebuilder.py similarity index 87% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/rebuilder.py rename to pymode/libs/astroid/rebuilder.py index 40a614f8..013479a8 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/rebuilder.py +++ b/pymode/libs/astroid/rebuilder.py @@ -20,10 +20,10 @@ """ import sys -from warnings import warn -from _ast import (Expr as Discard, Str, Name, Attribute, +from _ast import ( + Expr as Discard, Str, # binary operators - Add, Div, FloorDiv, Mod, Mult, Pow, Sub, BitAnd, BitOr, BitXor, + Add, BinOp, Div, FloorDiv, Mod, Mult, Pow, Sub, BitAnd, BitOr, BitXor, LShift, RShift, # logical operators And, Or, @@ -34,6 +34,7 @@ ) from astroid import nodes as new +from astroid import astpeephole _BIN_OP_CLASSES = {Add: '+', @@ -47,15 +48,18 @@ Pow: '**', Sub: '-', LShift: '<<', - RShift: '>>'} + RShift: '>>', + } _BOOL_OP_CLASSES = {And: 'and', - Or: 'or'} + Or: 'or', + } _UNARY_OP_CLASSES = {UAdd: '+', USub: '-', Not: 'not', - Invert: '~'} + Invert: '~', + } _CMP_OP_CLASSES = {Eq: '==', Gt: '>', @@ -66,11 +70,13 @@ Lt: '<', LtE: '<=', NotEq: '!=', - NotIn: 'not in'} + NotIn: 'not in', + } CONST_NAME_TRANSFORMS = {'None': None, 'True': True, - 'False': False} + 'False': False, + } REDIRECT = {'arguments': 'Arguments', 'Attribute': 'Getattr', @@ -86,7 +92,7 @@ 'ImportFrom': 'From', 'keyword': 'Keyword', 'Repr': 'Backquote', - } + } PY3K = sys.version_info >= (3, 0) PY34 = sys.version_info >= (3, 4) @@ -94,7 +100,6 @@ def _init_set_doc(node, newnode): newnode.doc = None try: if isinstance(node.body[0], Discard) and isinstance(node.body[0].value, Str): - newnode.tolineno = node.body[0].lineno newnode.doc = node.body[0].value.s node.body = node.body[1:] @@ -103,10 +108,8 @@ def _init_set_doc(node, newnode): def _lineno_parent(oldnode, newnode, parent): newnode.parent = parent - if hasattr(oldnode, 'lineno'): - newnode.lineno = oldnode.lineno - if hasattr(oldnode, 'col_offset'): - newnode.col_offset = oldnode.col_offset + newnode.lineno = oldnode.lineno + newnode.col_offset = oldnode.col_offset def _set_infos(oldnode, newnode, parent): newnode.parent = parent @@ -114,20 +117,12 @@ def _set_infos(oldnode, newnode, parent): newnode.lineno = oldnode.lineno if hasattr(oldnode, 'col_offset'): newnode.col_offset = oldnode.col_offset - newnode.set_line_info(newnode.last_child()) # set_line_info accepts None - -def _infer_metaclass(node): - if isinstance(node, Name): - return node.id - elif isinstance(node, Attribute): - return node.attr def _create_yield_node(node, parent, rebuilder, factory): newnode = factory() _lineno_parent(node, newnode, parent) if node.value is not None: newnode.value = rebuilder.visit(node.value, newnode) - newnode.set_line_info(newnode.last_child()) return newnode @@ -137,21 +132,21 @@ class TreeRebuilder(object): def __init__(self, manager): self._manager = manager self.asscontext = None - self._metaclass = [''] self._global_names = [] self._from_nodes = [] self._delayed_assattr = [] self._visit_meths = {} self._transform = manager.transform + self._peepholer = astpeephole.ASTPeepholeOptimizer() - def visit_module(self, node, modname, package): + def visit_module(self, node, modname, modpath, package): """visit a Module node by returning a fresh instance of it""" newnode = new.Module(modname, None) newnode.package = package - _lineno_parent(node, newnode, parent=None) + newnode.parent = None _init_set_doc(node, newnode) newnode.body = [self.visit(child, newnode) for child in node.body] - newnode.set_line_info(newnode.last_child()) + newnode.file = newnode.path = modpath return self._transform(newnode) def visit(self, node, parent): @@ -176,7 +171,7 @@ def _save_assignment(self, node, name=None): def visit_arguments(self, node, parent): """visit a Arguments node by returning a fresh instance of it""" newnode = new.Arguments() - _lineno_parent(node, newnode, parent) + newnode.parent = parent self.asscontext = "Ass" newnode.args = [self.visit(child, newnode) for child in node.args] self.asscontext = None @@ -186,10 +181,25 @@ def visit_arguments(self, node, parent): vararg, kwarg = node.vararg, node.kwarg # change added in 82732 (7c5c678e4164), vararg and kwarg # are instances of `_ast.arg`, not strings - if vararg and PY34: - vararg = vararg.arg - if kwarg and PY34: - kwarg = kwarg.arg + if vararg: + if PY34: + if vararg.annotation: + newnode.varargannotation = self.visit(vararg.annotation, + newnode) + vararg = vararg.arg + elif PY3K and node.varargannotation: + newnode.varargannotation = self.visit(node.varargannotation, + newnode) + if kwarg: + if PY34: + if kwarg.annotation: + newnode.kwargannotation = self.visit(kwarg.annotation, + newnode) + kwarg = kwarg.arg + elif PY3K: + if node.kwargannotation: + newnode.kwargannotation = self.visit(node.kwargannotation, + newnode) newnode.vararg = vararg newnode.kwarg = kwarg # save argument names in locals: @@ -197,7 +207,6 @@ def visit_arguments(self, node, parent): newnode.parent.set_local(vararg, newnode) if kwarg: newnode.parent.set_local(kwarg, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_assattr(self, node, parent): @@ -208,7 +217,6 @@ def visit_assattr(self, node, parent): newnode.expr = self.visit(node.expr, newnode) self.asscontext = assc self._delayed_assattr.append(newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_assert(self, node, parent): @@ -218,7 +226,6 @@ def visit_assert(self, node, parent): newnode.test = self.visit(node.test, newnode) if node.msg is not None: newnode.fail = self.visit(node.msg, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_assign(self, node, parent): @@ -232,8 +239,8 @@ def visit_assign(self, node, parent): # set some function or metaclass infos XXX explain ? klass = newnode.parent.frame() if (isinstance(klass, new.Class) - and isinstance(newnode.value, new.CallFunc) - and isinstance(newnode.value.func, new.Name)): + and isinstance(newnode.value, new.CallFunc) + and isinstance(newnode.value.func, new.Name)): func_name = newnode.value.func.name for ass_node in newnode.targets: try: @@ -246,10 +253,6 @@ def visit_assign(self, node, parent): meth.extra_decorators.append(newnode.value) except (AttributeError, KeyError): continue - elif getattr(newnode.targets[0], 'name', None) == '__metaclass__': - # XXX check more... - self._metaclass[-1] = _infer_metaclass(node.value) - newnode.set_line_info(newnode.last_child()) return newnode def visit_assname(self, node, parent, node_name=None): @@ -269,7 +272,6 @@ def visit_augassign(self, node, parent): newnode.target = self.visit(node.target, newnode) self.asscontext = None newnode.value = self.visit(node.value, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_backquote(self, node, parent): @@ -277,17 +279,33 @@ def visit_backquote(self, node, parent): newnode = new.Backquote() _lineno_parent(node, newnode, parent) newnode.value = self.visit(node.value, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_binop(self, node, parent): """visit a BinOp node by returning a fresh instance of it""" + if isinstance(node.left, BinOp) and self._manager.optimize_ast: + # Optimize BinOp operations in order to remove + # redundant recursion. For instance, if the + # following code is parsed in order to obtain + # its ast, then the rebuilder will fail with an + # infinite recursion, the same will happen with the + # inference engine as well. There's no need to hold + # so many objects for the BinOp if they can be reduced + # to something else (also, the optimization + # might handle only Const binops, which isn't a big + # problem for the correctness of the program). + # + # ("a" + "b" + # one thousand more + "c") + newnode = self._peepholer.optimize_binop(node) + if newnode: + _lineno_parent(node, newnode, parent) + return newnode + newnode = new.BinOp() _lineno_parent(node, newnode, parent) newnode.left = self.visit(node.left, newnode) newnode.right = self.visit(node.right, newnode) newnode.op = _BIN_OP_CLASSES[node.op.__class__] - newnode.set_line_info(newnode.last_child()) return newnode def visit_boolop(self, node, parent): @@ -296,7 +314,6 @@ def visit_boolop(self, node, parent): _lineno_parent(node, newnode, parent) newnode.values = [self.visit(child, newnode) for child in node.values] newnode.op = _BOOL_OP_CLASSES[node.op.__class__] - newnode.set_line_info(newnode.last_child()) return newnode def visit_break(self, node, parent): @@ -315,13 +332,12 @@ def visit_callfunc(self, node, parent): newnode.starargs = self.visit(node.starargs, newnode) if node.kwargs is not None: newnode.kwargs = self.visit(node.kwargs, newnode) - newnode.args.extend(self.visit(child, newnode) for child in node.keywords) - newnode.set_line_info(newnode.last_child()) + for child in node.keywords: + newnode.args.append(self.visit(child, newnode)) return newnode def visit_class(self, node, parent): """visit a Class node to become astroid""" - self._metaclass.append(self._metaclass[-1]) newnode = new.Class(node.name, None) _lineno_parent(node, newnode, parent) _init_set_doc(node, newnode) @@ -329,15 +345,6 @@ def visit_class(self, node, parent): newnode.body = [self.visit(child, newnode) for child in node.body] if 'decorator_list' in node._fields and node.decorator_list:# py >= 2.6 newnode.decorators = self.visit_decorators(node, newnode) - newnode.set_line_info(newnode.last_child()) - metaclass = self._metaclass.pop() - if PY3K: - newnode._newstyle = True - else: - if not newnode.bases: - # no base classes, detect new / style old style according to - # current scope - newnode._newstyle = metaclass in ('type', 'ABCMeta') newnode.parent.frame().set_local(newnode.name, newnode) return newnode @@ -359,20 +366,18 @@ def visit_compare(self, node, parent): _lineno_parent(node, newnode, parent) newnode.left = self.visit(node.left, newnode) newnode.ops = [(_CMP_OP_CLASSES[op.__class__], self.visit(expr, newnode)) - for (op, expr) in zip(node.ops, node.comparators)] - newnode.set_line_info(newnode.last_child()) + for (op, expr) in zip(node.ops, node.comparators)] return newnode def visit_comprehension(self, node, parent): """visit a Comprehension node by returning a fresh instance of it""" newnode = new.Comprehension() - _lineno_parent(node, newnode, parent) + newnode.parent = parent self.asscontext = "Ass" newnode.target = self.visit(node.target, newnode) self.asscontext = None newnode.iter = self.visit(node.iter, newnode) newnode.ifs = [self.visit(child, newnode) for child in node.ifs] - newnode.set_line_info(newnode.last_child()) return newnode def visit_decorators(self, node, parent): @@ -384,9 +389,8 @@ def visit_decorators(self, node, parent): if 'decorators' in node._fields: # py < 2.6, i.e. 2.5 decorators = node.decorators else: - decorators= node.decorator_list + decorators = node.decorator_list newnode.nodes = [self.visit(child, newnode) for child in decorators] - newnode.set_line_info(newnode.last_child()) return newnode def visit_delete(self, node, parent): @@ -396,7 +400,6 @@ def visit_delete(self, node, parent): self.asscontext = "Del" newnode.targets = [self.visit(child, newnode) for child in node.targets] self.asscontext = None - newnode.set_line_info(newnode.last_child()) return newnode def visit_dict(self, node, parent): @@ -404,8 +407,7 @@ def visit_dict(self, node, parent): newnode = new.Dict() _lineno_parent(node, newnode, parent) newnode.items = [(self.visit(key, newnode), self.visit(value, newnode)) - for key, value in zip(node.keys, node.values)] - newnode.set_line_info(newnode.last_child()) + for key, value in zip(node.keys, node.values)] return newnode def visit_dictcomp(self, node, parent): @@ -416,7 +418,6 @@ def visit_dictcomp(self, node, parent): newnode.value = self.visit(node.value, newnode) newnode.generators = [self.visit(child, newnode) for child in node.generators] - newnode.set_line_info(newnode.last_child()) return newnode def visit_discard(self, node, parent): @@ -424,7 +425,6 @@ def visit_discard(self, node, parent): newnode = new.Discard() _lineno_parent(node, newnode, parent) newnode.value = self.visit(node.value, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_ellipsis(self, node, parent): @@ -451,7 +451,6 @@ def visit_excepthandler(self, node, parent): newnode.name = self.visit(node.name, newnode) self.asscontext = None newnode.body = [self.visit(child, newnode) for child in node.body] - newnode.set_line_info(newnode.last_child()) return newnode def visit_exec(self, node, parent): @@ -463,15 +462,13 @@ def visit_exec(self, node, parent): newnode.globals = self.visit(node.globals, newnode) if node.locals is not None: newnode.locals = self.visit(node.locals, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_extslice(self, node, parent): """visit an ExtSlice node by returning a fresh instance of it""" newnode = new.ExtSlice() - _lineno_parent(node, newnode, parent) + newnode.parent = parent newnode.dims = [self.visit(dim, newnode) for dim in node.dims] - newnode.set_line_info(newnode.last_child()) return newnode def visit_for(self, node, parent): @@ -484,7 +481,6 @@ def visit_for(self, node, parent): newnode.iter = self.visit(node.iter, newnode) newnode.body = [self.visit(child, newnode) for child in node.body] newnode.orelse = [self.visit(child, newnode) for child in node.orelse] - newnode.set_line_info(newnode.last_child()) return newnode def visit_from(self, node, parent): @@ -511,7 +507,8 @@ def visit_function(self, node, parent): decorators = getattr(node, attr) if decorators: newnode.decorators = self.visit_decorators(node, newnode) - newnode.set_line_info(newnode.last_child()) + if PY3K and node.returns: + newnode.returns = self.visit(node.returns, newnode) self._global_names.pop() frame = newnode.parent.frame() if isinstance(frame, new.Class): @@ -535,7 +532,6 @@ def visit_genexpr(self, node, parent): _lineno_parent(node, newnode, parent) newnode.elt = self.visit(node.elt, newnode) newnode.generators = [self.visit(child, newnode) for child in node.generators] - newnode.set_line_info(newnode.last_child()) return newnode def visit_getattr(self, node, parent): @@ -555,7 +551,6 @@ def visit_getattr(self, node, parent): newnode.expr = self.visit(node.value, newnode) self.asscontext = asscontext newnode.attrname = node.attr - newnode.set_line_info(newnode.last_child()) return newnode def visit_global(self, node, parent): @@ -574,7 +569,6 @@ def visit_if(self, node, parent): newnode.test = self.visit(node.test, newnode) newnode.body = [self.visit(child, newnode) for child in node.body] newnode.orelse = [self.visit(child, newnode) for child in node.orelse] - newnode.set_line_info(newnode.last_child()) return newnode def visit_ifexp(self, node, parent): @@ -584,7 +578,6 @@ def visit_ifexp(self, node, parent): newnode.test = self.visit(node.test, newnode) newnode.body = self.visit(node.body, newnode) newnode.orelse = self.visit(node.orelse, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_import(self, node, parent): @@ -601,18 +594,16 @@ def visit_import(self, node, parent): def visit_index(self, node, parent): """visit a Index node by returning a fresh instance of it""" newnode = new.Index() - _lineno_parent(node, newnode, parent) + newnode.parent = parent newnode.value = self.visit(node.value, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_keyword(self, node, parent): """visit a Keyword node by returning a fresh instance of it""" newnode = new.Keyword() - _lineno_parent(node, newnode, parent) + newnode.parent = parent newnode.arg = node.arg newnode.value = self.visit(node.value, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_lambda(self, node, parent): @@ -621,7 +612,6 @@ def visit_lambda(self, node, parent): _lineno_parent(node, newnode, parent) newnode.args = self.visit(node.args, newnode) newnode.body = self.visit(node.body, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_list(self, node, parent): @@ -629,7 +619,6 @@ def visit_list(self, node, parent): newnode = new.List() _lineno_parent(node, newnode, parent) newnode.elts = [self.visit(child, newnode) for child in node.elts] - newnode.set_line_info(newnode.last_child()) return newnode def visit_listcomp(self, node, parent): @@ -639,7 +628,6 @@ def visit_listcomp(self, node, parent): newnode.elt = self.visit(node.elt, newnode) newnode.generators = [self.visit(child, newnode) for child in node.generators] - newnode.set_line_info(newnode.last_child()) return newnode def visit_name(self, node, parent): @@ -662,7 +650,6 @@ def visit_name(self, node, parent): # XXX REMOVE me : if self.asscontext in ('Del', 'Ass'): # 'Aug' ?? self._save_assignment(newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_bytes(self, node, parent): @@ -697,7 +684,6 @@ def visit_print(self, node, parent): if node.dest is not None: newnode.dest = self.visit(node.dest, newnode) newnode.values = [self.visit(child, newnode) for child in node.values] - newnode.set_line_info(newnode.last_child()) return newnode def visit_raise(self, node, parent): @@ -710,7 +696,6 @@ def visit_raise(self, node, parent): newnode.inst = self.visit(node.inst, newnode) if node.tback is not None: newnode.tback = self.visit(node.tback, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_return(self, node, parent): @@ -719,7 +704,6 @@ def visit_return(self, node, parent): _lineno_parent(node, newnode, parent) if node.value is not None: newnode.value = self.visit(node.value, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_set(self, node, parent): @@ -727,7 +711,6 @@ def visit_set(self, node, parent): newnode = new.Set() _lineno_parent(node, newnode, parent) newnode.elts = [self.visit(child, newnode) for child in node.elts] - newnode.set_line_info(newnode.last_child()) return newnode def visit_setcomp(self, node, parent): @@ -737,20 +720,18 @@ def visit_setcomp(self, node, parent): newnode.elt = self.visit(node.elt, newnode) newnode.generators = [self.visit(child, newnode) for child in node.generators] - newnode.set_line_info(newnode.last_child()) return newnode def visit_slice(self, node, parent): """visit a Slice node by returning a fresh instance of it""" newnode = new.Slice() - _lineno_parent(node, newnode, parent) + newnode.parent = parent if node.lower is not None: newnode.lower = self.visit(node.lower, newnode) if node.upper is not None: newnode.upper = self.visit(node.upper, newnode) if node.step is not None: newnode.step = self.visit(node.step, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_subscript(self, node, parent): @@ -761,7 +742,6 @@ def visit_subscript(self, node, parent): newnode.value = self.visit(node.value, newnode) newnode.slice = self.visit(node.slice, newnode) self.asscontext = subcontext - newnode.set_line_info(newnode.last_child()) return newnode def visit_tryexcept(self, node, parent): @@ -771,7 +751,6 @@ def visit_tryexcept(self, node, parent): newnode.body = [self.visit(child, newnode) for child in node.body] newnode.handlers = [self.visit(child, newnode) for child in node.handlers] newnode.orelse = [self.visit(child, newnode) for child in node.orelse] - newnode.set_line_info(newnode.last_child()) return newnode def visit_tryfinally(self, node, parent): @@ -780,7 +759,6 @@ def visit_tryfinally(self, node, parent): _lineno_parent(node, newnode, parent) newnode.body = [self.visit(child, newnode) for child in node.body] newnode.finalbody = [self.visit(n, newnode) for n in node.finalbody] - newnode.set_line_info(newnode.last_child()) return newnode def visit_tuple(self, node, parent): @@ -788,7 +766,6 @@ def visit_tuple(self, node, parent): newnode = new.Tuple() _lineno_parent(node, newnode, parent) newnode.elts = [self.visit(child, newnode) for child in node.elts] - newnode.set_line_info(newnode.last_child()) return newnode def visit_unaryop(self, node, parent): @@ -797,7 +774,6 @@ def visit_unaryop(self, node, parent): _lineno_parent(node, newnode, parent) newnode.operand = self.visit(node.operand, newnode) newnode.op = _UNARY_OP_CLASSES[node.op.__class__] - newnode.set_line_info(newnode.last_child()) return newnode def visit_while(self, node, parent): @@ -807,7 +783,6 @@ def visit_while(self, node, parent): newnode.test = self.visit(node.test, newnode) newnode.body = [self.visit(child, newnode) for child in node.body] newnode.orelse = [self.visit(child, newnode) for child in node.orelse] - newnode.set_line_info(newnode.last_child()) return newnode def visit_with(self, node, parent): @@ -822,7 +797,6 @@ def visit_with(self, node, parent): self.asscontext = None newnode.items = [(expr, vars)] newnode.body = [self.visit(child, newnode) for child in node.body] - newnode.set_line_info(newnode.last_child()) return newnode def visit_yield(self, node, parent): @@ -850,6 +824,9 @@ def visit_arguments(self, node, parent): newnode.kwonlyargs = [self.visit(child, newnode) for child in node.kwonlyargs] self.asscontext = None newnode.kw_defaults = [self.visit(child, newnode) if child else None for child in node.kw_defaults] + newnode.annotations = [ + self.visit(arg.annotation, newnode) if arg.annotation else None + for arg in node.args] return newnode def visit_excepthandler(self, node, parent): @@ -861,7 +838,6 @@ def visit_excepthandler(self, node, parent): if node.name is not None: newnode.name = self.visit_assname(node, newnode, node.name) newnode.body = [self.visit(child, newnode) for child in node.body] - newnode.set_line_info(newnode.last_child()) return newnode def visit_nonlocal(self, node, parent): @@ -879,7 +855,6 @@ def visit_raise(self, node, parent): newnode.exc = self.visit(node.exc, newnode) if node.cause is not None: newnode.cause = self.visit(node.cause, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_starred(self, node, parent): @@ -887,7 +862,6 @@ def visit_starred(self, node, parent): newnode = new.Starred() _lineno_parent(node, newnode, parent) newnode.value = self.visit(node.value, newnode) - newnode.set_line_info(newnode.last_child()) return newnode def visit_try(self, node, parent): @@ -902,7 +876,6 @@ def visit_try(self, node, parent): excnode.body = [self.visit(child, excnode) for child in node.body] excnode.handlers = [self.visit(child, excnode) for child in node.handlers] excnode.orelse = [self.visit(child, excnode) for child in node.orelse] - excnode.set_line_info(excnode.last_child()) newnode.body = [excnode] else: newnode.body = [self.visit(child, newnode) for child in node.body] @@ -912,7 +885,6 @@ def visit_try(self, node, parent): newnode.body = [self.visit(child, newnode) for child in node.body] newnode.handlers = [self.visit(child, newnode) for child in node.handlers] newnode.orelse = [self.visit(child, newnode) for child in node.orelse] - newnode.set_line_info(newnode.last_child()) return newnode def visit_with(self, node, parent): @@ -934,7 +906,6 @@ def visit_child(child): newnode.items = [visit_child(child) for child in node.items] newnode.body = [self.visit(child, newnode) for child in node.body] - newnode.set_line_info(newnode.last_child()) return newnode def visit_yieldfrom(self, node, parent): @@ -942,6 +913,7 @@ def visit_yieldfrom(self, node, parent): def visit_class(self, node, parent): newnode = super(TreeRebuilder3k, self).visit_class(node, parent) + newnode._newstyle = True for keyword in node.keywords: if keyword.arg == 'metaclass': newnode._metaclass = self.visit(keyword, newnode).value diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/scoped_nodes.py b/pymode/libs/astroid/scoped_nodes.py similarity index 67% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/scoped_nodes.py rename to pymode/libs/astroid/scoped_nodes.py index 889baa0e..ac90f878 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/scoped_nodes.py +++ b/pymode/libs/astroid/scoped_nodes.py @@ -24,27 +24,72 @@ __doctype__ = "restructuredtext en" import sys +import warnings from itertools import chain try: from io import BytesIO except ImportError: from cStringIO import StringIO as BytesIO +import six from logilab.common.compat import builtins from logilab.common.decorators import cached, cachedproperty from astroid.exceptions import NotFoundError, \ - AstroidBuildingException, InferenceError + AstroidBuildingException, InferenceError, ResolveError from astroid.node_classes import Const, DelName, DelAttr, \ Dict, From, List, Pass, Raise, Return, Tuple, Yield, YieldFrom, \ - LookupMixIn, const_factory as cf, unpack_infer, Name -from astroid.bases import NodeNG, InferenceContext, Instance,\ - YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, copy_context, \ + LookupMixIn, const_factory as cf, unpack_infer, CallFunc +from astroid.bases import NodeNG, InferenceContext, Instance, copy_context, \ + YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, \ BUILTINS from astroid.mixins import FilterStmtsMixin from astroid.bases import Statement from astroid.manager import AstroidManager +ITER_METHODS = ('__iter__', '__getitem__') +PY3K = sys.version_info >= (3, 0) + +def _c3_merge(sequences): + """Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from http://www.python.org/download/releases/2.3/mro/. + + """ + result = [] + while True: + sequences = [s for s in sequences if s] # purge empty sequences + if not sequences: + return result + for s1 in sequences: # find merge candidates among seq heads + candidate = s1[0] + for s2 in sequences: + if candidate in s2[1:]: + candidate = None + break # reject the current head, it appears later + else: + break + if not candidate: + # Show all the remaining bases, which were considered as + # candidates for the next mro sequence. + bases = ["({})".format(", ".join(base.name + for base in subsequence)) + for subsequence in sequences] + raise ResolveError("Cannot create a consistent method resolution " + "order for bases %s" % ", ".join(bases)) + result.append(candidate) + # remove the chosen candidate + for seq in sequences: + if seq[0] == candidate: + del seq[0] + + +def _verify_duplicates_mro(sequences): + for sequence in sequences: + names = [node.qname() for node in sequence] + if len(names) != len(set(names)): + raise ResolveError('Duplicates found in the mro.') + def remove_nodes(func, cls): def wrapper(*args, **kwargs): @@ -188,7 +233,7 @@ def keys(self): """method from the `dict` interface returning a tuple containing locally defined names """ - return self.locals.keys() + return list(self.locals.keys()) def values(self): """method from the `dict` interface returning a tuple containing @@ -201,7 +246,7 @@ def items(self): containing each locally defined name with its associated node, which is an instance of `Function` or `Class` """ - return zip(self.keys(), self.values()) + return list(zip(self.keys(), self.values())) def __contains__(self, name): @@ -253,14 +298,37 @@ def __init__(self, name, doc, pure_python=True): self.body = [] self.future_imports = set() - @property - def file_stream(self): + def _get_stream(self): if self.file_bytes is not None: return BytesIO(self.file_bytes) if self.file is not None: - return open(self.file, 'rb') + stream = open(self.file, 'rb') + return stream return None + @property + def file_stream(self): + warnings.warn("file_stream property is deprecated and " + "it is slated for removal in astroid 1.6." + "Use the new method 'stream' instead.", + PendingDeprecationWarning, + stacklevel=2) + return self._get_stream() + + def stream(self): + """Get a stream to the underlying file or bytes.""" + return self._get_stream() + + def close(self): + """Close the underlying file streams.""" + warnings.warn("close method is deprecated and it is " + "slated for removal in astroid 1.6, along " + "with 'file_stream' property. " + "Its behaviour is replaced by managing each " + "file stream returned by the 'stream' method.", + PendingDeprecationWarning, + stacklevel=2) + def block_range(self, lineno): """return block line numbers. @@ -336,13 +404,17 @@ def next_sibling(self): return if sys.version_info < (2, 8): - def absolute_import_activated(self): + @cachedproperty + def _absolute_import_activated(self): for stmt in self.locals.get('absolute_import', ()): if isinstance(stmt, From) and stmt.modname == '__future__': return True return False else: - absolute_import_activated = lambda self: True + _absolute_import_activated = True + + def absolute_import_activated(self): + return self._absolute_import_activated def import_module(self, modname, relative_only=False, level=None): """import the given module considering self as context""" @@ -405,24 +477,43 @@ def wildcard_import_names(self): # # We separate the different steps of lookup in try/excepts # to avoid catching too many Exceptions - # However, we can not analyse dynamically constructed __all__ + default = [name for name in self.keys() if not name.startswith('_')] try: all = self['__all__'] except KeyError: - return [name for name in self.keys() if not name.startswith('_')] + return default try: - explicit = all.assigned_stmts().next() + explicit = next(all.assigned_stmts()) except InferenceError: - return [name for name in self.keys() if not name.startswith('_')] + return default except AttributeError: # not an assignment node # XXX infer? - return [name for name in self.keys() if not name.startswith('_')] + return default + + # Try our best to detect the exported name. + infered = [] try: - # should be a Tuple/List of constant string / 1 string not allowed - return [const.value for const in explicit.elts] - except AttributeError: - return [name for name in self.keys() if not name.startswith('_')] + explicit = next(explicit.infer()) + except InferenceError: + return default + if not isinstance(explicit, (Tuple, List)): + return default + + str_const = lambda node: (isinstance(node, Const) and + isinstance(node.value, six.string_types)) + for node in explicit.elts: + if str_const(node): + infered.append(node.value) + else: + try: + infered_node = next(node.infer()) + except InferenceError: + continue + if str_const(infered_node): + infered.append(infered_node.value) + return infered + class ComprehensionScope(LocalsDictNodeNG): @@ -476,7 +567,31 @@ class ListComp(_ListComp): """class representing a ListComp node""" # Function ################################################################### - + +def _infer_decorator_callchain(node): + """Detect decorator call chaining and see if the end result is a + static or a classmethod. + """ + if not isinstance(node, Function): + return + if not node.parent: + return + try: + # TODO: We don't handle multiple inference results right now, + # because there's no flow to reason when the return + # is what we are looking for, a static or a class method. + result = next(node.infer_call_result(node.parent)) + except (StopIteration, InferenceError): + return + if isinstance(result, Instance): + result = result._proxied + if isinstance(result, Class): + if result.is_subtype_of('%s.classmethod' % BUILTINS): + return 'classmethod' + if result.is_subtype_of('%s.staticmethod' % BUILTINS): + return 'staticmethod' + + def _function_type(self): """ Function type, possible values are: @@ -487,20 +602,35 @@ def _function_type(self): # so do it here. if self.decorators: for node in self.decorators.nodes: - if not isinstance(node, Name): - continue + if isinstance(node, CallFunc): + # Handle the following case: + # @some_decorator(arg1, arg2) + # def func(...) + # + try: + current = next(node.func.infer()) + except InferenceError: + continue + _type = _infer_decorator_callchain(current) + if _type is not None: + return _type + try: for infered in node.infer(): + # Check to see if this returns a static or a class method. + _type = _infer_decorator_callchain(infered) + if _type is not None: + return _type + if not isinstance(infered, Class): continue for ancestor in infered.ancestors(): - if isinstance(ancestor, Class): - if (ancestor.name == 'classmethod' and - ancestor.root().name == BUILTINS): - return 'classmethod' - elif (ancestor.name == 'staticmethod' and - ancestor.root().name == BUILTINS): - return 'staticmethod' + if not isinstance(ancestor, Class): + continue + if ancestor.is_subtype_of('%s.classmethod' % BUILTINS): + return 'classmethod' + elif ancestor.is_subtype_of('%s.staticmethod' % BUILTINS): + return 'staticmethod' except InferenceError: pass return self._type @@ -560,7 +690,11 @@ def scope_lookup(self, node, name, offset=0): class Function(Statement, Lambda): - _astroid_fields = ('decorators', 'args', 'body') + if PY3K: + _astroid_fields = ('decorators', 'args', 'body', 'returns') + returns = None + else: + _astroid_fields = ('decorators', 'args', 'body') special_attributes = set(('__name__', '__doc__', '__dict__')) is_function = True @@ -574,22 +708,25 @@ def __init__(self, name, doc): self.locals = {} self.args = [] self.body = [] - self.decorators = None self.name = name self.doc = doc self.extra_decorators = [] self.instance_attrs = {} - def set_line_info(self, lastchild): - self.fromlineno = self.lineno - # lineno is the line number of the first decorator, we want the def statement lineno + @cachedproperty + def fromlineno(self): + # lineno is the line number of the first decorator, we want the def + # statement lineno + lineno = self.lineno if self.decorators is not None: - self.fromlineno += sum(node.tolineno - node.lineno + 1 + lineno += sum(node.tolineno - node.lineno + 1 for node in self.decorators.nodes) - if self.args.fromlineno < self.fromlineno: - self.args.fromlineno = self.fromlineno - self.tolineno = lastchild.tolineno - self.blockstart_tolineno = self.args.tolineno + + return lineno + + @cachedproperty + def blockstart_tolineno(self): + return self.args.tolineno def block_range(self, lineno): """return block line numbers. @@ -633,7 +770,7 @@ def is_bound(self): def is_abstract(self, pass_is_abstract=True): """Returns True if the method is abstract. - + A method is considered abstract if - the only statement is 'raise NotImplementedError', or - the only statement is 'pass' and pass_is_abstract is True, or @@ -642,7 +779,7 @@ def is_abstract(self, pass_is_abstract=True): if self.decorators: for node in self.decorators.nodes: try: - infered = node.infer().next() + infered = next(node.infer()) except InferenceError: continue if infered and infered.qname() in ('abc.abstractproperty', @@ -663,17 +800,33 @@ def is_abstract(self, pass_is_abstract=True): def is_generator(self): """return true if this is a generator function""" # XXX should be flagged, not computed - try: - return self.nodes_of_class((Yield, YieldFrom), - skip_klass=(Function, Lambda)).next() - except StopIteration: - return False + return next(self.nodes_of_class((Yield, YieldFrom), + skip_klass=(Function, Lambda)), False) def infer_call_result(self, caller, context=None): """infer what a function is returning when called""" if self.is_generator(): yield Generator() return + # This is really a gigantic hack to work around metaclass generators + # that return transient class-generating functions. Pylint's AST structure + # cannot handle a base class object that is only used for calling __new__, + # but does not contribute to the inheritance structure itself. We inject + # a fake class into the hierarchy here for several well-known metaclass + # generators, and filter it out later. + if (self.name == 'with_metaclass' and + len(self.args.args) == 1 and + self.args.vararg is not None): + metaclass = next(caller.args[0].infer(context)) + if isinstance(metaclass, Class): + c = Class('temporary_class', None) + c.hide = True + c.parent = self + bases = [next(b.infer(context)) for b in caller.args[1:]] + c.bases = [base for base in bases if base != YES] + c._metaclass = metaclass + yield c + return returns = self.nodes_of_class(Return, skip_klass=Function) for returnnode in returns: if returnnode.value is None: @@ -701,15 +854,21 @@ def _rec_get_names(args, names=None): # Class ###################################################################### -def _is_metaclass(klass): +def _is_metaclass(klass, seen=None): """ Return if the given class can be used as a metaclass. """ if klass.name == 'type': return True + if seen is None: + seen = set() for base in klass.bases: try: for baseobj in base.infer(): + if baseobj in seen: + continue + else: + seen.add(baseobj) if isinstance(baseobj, Instance): # not abstract return False @@ -721,7 +880,7 @@ def _is_metaclass(klass): continue if baseobj._type == 'metaclass': return True - if _is_metaclass(baseobj): + if _is_metaclass(baseobj, seen): return True except InferenceError: continue @@ -749,16 +908,15 @@ def _class_type(klass, ancestors=None): klass._type = 'class' return 'class' ancestors.add(klass) - # print >> sys.stderr, '_class_type', repr(klass) for base in klass.ancestors(recurs=False): name = _class_type(base, ancestors) if name != 'class': - if name == 'metaclass' and not _is_metaclass(klass): - # don't propagate it if the current class - # can't be a metaclass - continue - klass._type = base.type - break + if name == 'metaclass' and not _is_metaclass(klass): + # don't propagate it if the current class + # can't be a metaclass + continue + klass._type = base.type + break if klass._type is None: klass._type = 'class' return klass._type @@ -784,6 +942,8 @@ class Class(Statement, LocalsDictNodeNG, FilterStmtsMixin): blockstart_tolineno = None _type = None + _metaclass_hack = False + hide = False type = property(_class_type, doc="class'type, possible values are 'class' | " "'metaclass' | 'interface' | 'exception'") @@ -805,6 +965,11 @@ def _newstyle_impl(self, context=None): if base._newstyle_impl(context): self._newstyle = True break + klass = self._explicit_metaclass() + # could be any callable, we'd need to infer the result of klass(name, + # bases, dict). punt if it's not a class node. + if klass is not None and isinstance(klass, Class): + self._newstyle = klass._newstyle_impl(context) if self._newstyle is None: self._newstyle = False return self._newstyle @@ -814,12 +979,12 @@ def _newstyle_impl(self, context=None): doc="boolean indicating if it's a new style class" "or not") - def set_line_info(self, lastchild): - self.fromlineno = self.lineno - self.blockstart_tolineno = self.bases and self.bases[-1].tolineno or self.fromlineno - if lastchild is not None: - self.tolineno = lastchild.tolineno - # else this is a class with only a docstring, then tolineno is (should be) already ok + @cachedproperty + def blockstart_tolineno(self): + if self.bases: + return self.bases[-1].tolineno + else: + return self.fromlineno def block_range(self, lineno): """return block line numbers. @@ -839,12 +1004,54 @@ def display_type(self): def callable(self): return True + def is_subtype_of(self, type_name, context=None): + if self.qname() == type_name: + return True + for anc in self.ancestors(context=context): + if anc.qname() == type_name: + return True + def infer_call_result(self, caller, context=None): """infer what a class is returning when called""" - yield Instance(self) + if self.is_subtype_of('%s.type' % (BUILTINS,), context) and len(caller.args) == 3: + name_node = next(caller.args[0].infer(context)) + if (isinstance(name_node, Const) and + isinstance(name_node.value, six.string_types)): + name = name_node.value + else: + yield YES + return + result = Class(name, None) + bases = next(caller.args[1].infer(context)) + if isinstance(bases, (Tuple, List)): + result.bases = bases.itered() + else: + # There is currently no AST node that can represent an 'unknown' + # node (YES is not an AST node), therefore we simply return YES here + # although we know at least the name of the class. + yield YES + return + result.parent = caller.parent + yield result + else: + yield Instance(self) def scope_lookup(self, node, name, offset=0): - if node in self.bases: + if any(node == base or base.parent_of(node) + for base in self.bases): + # Handle the case where we have either a name + # in the bases of a class, which exists before + # the actual definition or the case where we have + # a Getattr node, with that name. + # + # name = ... + # class A(name): + # def name(self): ... + # + # import name + # class A(name.Name): + # def name(self): ... + frame = self.parent.frame() # line offset to avoid that class A(A) resolve the ancestor to # the defined class @@ -868,11 +1075,15 @@ def ancestors(self, recurs=True, context=None): ancestors only """ # FIXME: should be possible to choose the resolution order - # XXX inference make infinite loops possible here (see BaseTransformer - # manipulation in the builder module for instance) + # FIXME: inference make infinite loops possible here yielded = set([self]) if context is None: context = InferenceContext() + if sys.version_info[0] >= 3: + if not self.bases and self.qname() != 'builtins.object': + yield builtin_lookup("object")[1][0] + return + for stmt in self.bases: with context.restore_path(): try: @@ -883,12 +1094,17 @@ def ancestors(self, recurs=True, context=None): else: # duh ? continue - if baseobj in yielded: - continue # cf xxx above - yielded.add(baseobj) - yield baseobj + if not baseobj.hide: + if baseobj in yielded: + continue # cf xxx above + yielded.add(baseobj) + yield baseobj if recurs: - for grandpa in baseobj.ancestors(True, context): + for grandpa in baseobj.ancestors(recurs=True, + context=context): + if grandpa is self: + # This class is the ancestor of itself. + break if grandpa in yielded: continue # cf xxx above yielded.add(grandpa) @@ -941,7 +1157,9 @@ def instance_attr(self, name, context=None): if no attribute with this name has been find in this class or its parent classes """ - values = self.instance_attrs.get(name, []) + # Return a copy, so we don't modify self.instance_attrs, + # which could lead to infinite loop. + values = list(self.instance_attrs.get(name, [])) # get all values from parents for class_node in self.instance_attr_ancestors(name, context): values += class_node.instance_attrs[name] @@ -1079,23 +1297,45 @@ def _explicit_metaclass(self): An explicit defined metaclass is defined either by passing the ``metaclass`` keyword argument - in the class definition line (Python 3) or by - having a ``__metaclass__`` class attribute. + in the class definition line (Python 3) or (Python 2) by + having a ``__metaclass__`` class attribute, or if there are + no explicit bases but there is a global ``__metaclass__`` variable. """ + for base in self.bases: + try: + for baseobj in base.infer(): + if isinstance(baseobj, Class) and baseobj.hide: + self._metaclass = baseobj._metaclass + self._metaclass_hack = True + break + except InferenceError: + pass + if self._metaclass: # Expects this from Py3k TreeRebuilder try: return next(node for node in self._metaclass.infer() if node is not YES) except (InferenceError, StopIteration): - return + return None + if sys.version_info >= (3, ): + return None + + if '__metaclass__' in self.locals: + assignment = self.locals['__metaclass__'][-1] + elif self.bases: + return None + elif '__metaclass__' in self.root().locals: + assignments = [ass for ass in self.root().locals['__metaclass__'] + if ass.lineno < self.lineno] + if not assignments: + return None + assignment = assignments[-1] + else: + return None try: - meta = self.getattr('__metaclass__')[0] - except NotFoundError: - return - try: - infered = meta.infer().next() + infered = next(assignment.infer()) except InferenceError: return if infered is YES: # don't expose this @@ -1116,3 +1356,129 @@ def metaclass(self): if klass is not None: break return klass + + def has_metaclass_hack(self): + return self._metaclass_hack + + def _islots(self): + """ Return an iterator with the inferred slots. """ + if '__slots__' not in self.locals: + return + for slots in self.igetattr('__slots__'): + # check if __slots__ is a valid type + for meth in ITER_METHODS: + try: + slots.getattr(meth) + break + except NotFoundError: + continue + else: + continue + + if isinstance(slots, Const): + # a string. Ignore the following checks, + # but yield the node, only if it has a value + if slots.value: + yield slots + continue + if not hasattr(slots, 'itered'): + # we can't obtain the values, maybe a .deque? + continue + + if isinstance(slots, Dict): + values = [item[0] for item in slots.items] + else: + values = slots.itered() + if values is YES: + continue + + for elt in values: + try: + for infered in elt.infer(): + if infered is YES: + continue + if (not isinstance(infered, Const) or + not isinstance(infered.value, + six.string_types)): + continue + if not infered.value: + continue + yield infered + except InferenceError: + continue + + # Cached, because inferring them all the time is expensive + @cached + def slots(self): + """Get all the slots for this node. + + If the class doesn't define any slot, through `__slots__` + variable, then this function will return a None. + Also, it will return None in the case the slots weren't inferred. + Otherwise, it will return a list of slot names. + """ + if not self.newstyle: + raise NotImplementedError( + "The concept of slots is undefined for old-style classes.") + + slots = self._islots() + try: + first = next(slots) + except StopIteration: + # The class doesn't have a __slots__ definition. + return None + return [first] + list(slots) + + def _inferred_bases(self, recurs=True, context=None): + # TODO(cpopa): really similar with .ancestors, + # but the difference is when one base is inferred, + # only the first object is wanted. That's because + # we aren't interested in superclasses, as in the following + # example: + # + # class SomeSuperClass(object): pass + # class SomeClass(SomeSuperClass): pass + # class Test(SomeClass): pass + # + # Inferring SomeClass from the Test's bases will give + # us both SomeClass and SomeSuperClass, but we are interested + # only in SomeClass. + + if context is None: + context = InferenceContext() + if sys.version_info[0] >= 3: + if not self.bases and self.qname() != 'builtins.object': + yield builtin_lookup("object")[1][0] + return + + for stmt in self.bases: + try: + baseobj = next(stmt.infer(context=context)) + except InferenceError: + # XXX log error ? + continue + if isinstance(baseobj, Instance): + baseobj = baseobj._proxied + if not isinstance(baseobj, Class): + continue + if not baseobj.hide: + yield baseobj + + def mro(self, context=None): + """Get the method resolution order, using C3 linearization. + + It returns the list of ancestors sorted by the mro. + This will raise `NotImplementedError` for old-style classes, since + they don't have the concept of MRO. + """ + if not self.newstyle: + raise NotImplementedError( + "Could not obtain mro for old-style classes.") + + bases = list(self._inferred_bases(context=context)) + unmerged_mro = ([[self]] + + [base.mro() for base in bases if base is not self] + + [bases]) + + _verify_duplicates_mro(unmerged_mro) + return _c3_merge(unmerged_mro) diff --git a/pymode/libs/astroid/test_utils.py b/pymode/libs/astroid/test_utils.py new file mode 100644 index 00000000..19bd7b96 --- /dev/null +++ b/pymode/libs/astroid/test_utils.py @@ -0,0 +1,218 @@ +"""Utility functions for test code that uses astroid ASTs as input.""" +import functools +import sys +import textwrap + +from astroid import nodes +from astroid import builder +# The name of the transient function that is used to +# wrap expressions to be extracted when calling +# extract_node. +_TRANSIENT_FUNCTION = '__' + +# The comment used to select a statement to be extracted +# when calling extract_node. +_STATEMENT_SELECTOR = '#@' + + +def _extract_expressions(node): + """Find expressions in a call to _TRANSIENT_FUNCTION and extract them. + + The function walks the AST recursively to search for expressions that + are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an + expression, it completely removes the function call node from the tree, + replacing it by the wrapped expression inside the parent. + + :param node: An astroid node. + :type node: astroid.bases.NodeNG + :yields: The sequence of wrapped expressions on the modified tree + expression can be found. + """ + if (isinstance(node, nodes.CallFunc) + and isinstance(node.func, nodes.Name) + and node.func.name == _TRANSIENT_FUNCTION): + real_expr = node.args[0] + real_expr.parent = node.parent + # Search for node in all _astng_fields (the fields checked when + # get_children is called) of its parent. Some of those fields may + # be lists or tuples, in which case the elements need to be checked. + # When we find it, replace it by real_expr, so that the AST looks + # like no call to _TRANSIENT_FUNCTION ever took place. + for name in node.parent._astroid_fields: + child = getattr(node.parent, name) + if isinstance(child, (list, tuple)): + for idx, compound_child in enumerate(child): + if compound_child is node: + child[idx] = real_expr + elif child is node: + setattr(node.parent, name, real_expr) + yield real_expr + else: + for child in node.get_children(): + for result in _extract_expressions(child): + yield result + + +def _find_statement_by_line(node, line): + """Extracts the statement on a specific line from an AST. + + If the line number of node matches line, it will be returned; + otherwise its children are iterated and the function is called + recursively. + + :param node: An astroid node. + :type node: astroid.bases.NodeNG + :param line: The line number of the statement to extract. + :type line: int + :returns: The statement on the line, or None if no statement for the line + can be found. + :rtype: astroid.bases.NodeNG or None + """ + if isinstance(node, (nodes.Class, nodes.Function)): + # This is an inaccuracy in the AST: the nodes that can be + # decorated do not carry explicit information on which line + # the actual definition (class/def), but .fromline seems to + # be close enough. + node_line = node.fromlineno + else: + node_line = node.lineno + + if node_line == line: + return node + + for child in node.get_children(): + result = _find_statement_by_line(child, line) + if result: + return result + + return None + +def extract_node(code, module_name=''): + """Parses some Python code as a module and extracts a designated AST node. + + Statements: + To extract one or more statement nodes, append #@ to the end of the line + + Examples: + >>> def x(): + >>> def y(): + >>> return 1 #@ + + The return statement will be extracted. + + >>> class X(object): + >>> def meth(self): #@ + >>> pass + + The funcion object 'meth' will be extracted. + + Expressions: + To extract arbitrary expressions, surround them with the fake + function call __(...). After parsing, the surrounded expression + will be returned and the whole AST (accessible via the returned + node's parent attribute) will look like the function call was + never there in the first place. + + Examples: + >>> a = __(1) + + The const node will be extracted. + + >>> def x(d=__(foo.bar)): pass + + The node containing the default argument will be extracted. + + >>> def foo(a, b): + >>> return 0 < __(len(a)) < b + + The node containing the function call 'len' will be extracted. + + If no statements or expressions are selected, the last toplevel + statement will be returned. + + If the selected statement is a discard statement, (i.e. an expression + turned into a statement), the wrapped expression is returned instead. + + For convenience, singleton lists are unpacked. + + :param str code: A piece of Python code that is parsed as + a module. Will be passed through textwrap.dedent first. + :param str module_name: The name of the module. + :returns: The designated node from the parse tree, or a list of nodes. + :rtype: astroid.bases.NodeNG, or a list of nodes. + """ + def _extract(node): + if isinstance(node, nodes.Discard): + return node.value + else: + return node + + requested_lines = [] + for idx, line in enumerate(code.splitlines()): + if line.strip().endswith(_STATEMENT_SELECTOR): + requested_lines.append(idx + 1) + + tree = build_module(code, module_name=module_name) + extracted = [] + if requested_lines: + for line in requested_lines: + extracted.append(_find_statement_by_line(tree, line)) + + # Modifies the tree. + extracted.extend(_extract_expressions(tree)) + + if not extracted: + extracted.append(tree.body[-1]) + + extracted = [_extract(node) for node in extracted] + if len(extracted) == 1: + return extracted[0] + else: + return extracted + + +def build_module(code, module_name='', path=None): + """Parses a string module with a builder. + :param code: The code for the module. + :type code: str + :param module_name: The name for the module + :type module_name: str + :param path: The path for the module + :type module_name: str + :returns: The module AST. + :rtype: astroid.bases.NodeNG + """ + code = textwrap.dedent(code) + return builder.AstroidBuilder(None).string_build(code, modname=module_name, path=path) + + +def require_version(minver=None, maxver=None): + """ Compare version of python interpreter to the given one. Skip the test + if older. + """ + def parse(string, default=None): + string = string or default + try: + return tuple(int(v) for v in string.split('.')) + except ValueError: + raise ValueError('%s is not a correct version : should be X.Y[.Z].' % version) + + def check_require_version(f): + current = sys.version_info[:3] + if parse(minver, "0") < current <= parse(maxver, "4"): + return f + else: + str_version = '.'.join(str(v) for v in sys.version_info) + @functools.wraps(f) + def new_f(self, *args, **kwargs): + if minver is not None: + self.skipTest('Needs Python > %s. Current version is %s.' % (minver, str_version)) + elif maxver is not None: + self.skipTest('Needs Python <= %s. Current version is %s.' % (maxver, str_version)) + return new_f + + + return check_require_version + +def get_name_node(start_from, name, index=0): + return [n for n in start_from.nodes_of_class(nodes.Name) if n.name == name][index] diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/utils.py b/pymode/libs/astroid/utils.py similarity index 89% rename from pymode/libs/pylama/lint/pylama_pylint/astroid/utils.py rename to pymode/libs/astroid/utils.py index 1cd0e778..ae72a92c 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/utils.py +++ b/pymode/libs/astroid/utils.py @@ -18,6 +18,7 @@ """this module contains some utilities to navigate in the tree or to extract information from it """ +from __future__ import print_function __docformat__ = "restructuredtext en" @@ -25,7 +26,7 @@ from astroid.builder import parse -class ASTWalker: +class ASTWalker(object): """a walker visiting a tree in preorder, calling on the handler: * visit_ on entering a node, where class name is the class of @@ -98,7 +99,7 @@ def visit(self, node): if methods[0] is not None: methods[0](node) if 'locals' in node.__dict__: # skip Instance and other proxy - for name, local_node in node.items(): + for local_node in node.values(): self.visit(local_node) if methods[1] is not None: return methods[1](node) @@ -109,20 +110,22 @@ def _check_children(node): for child in node.get_children(): ok = False if child is None: - print "Hm, child of %s is None" % node + print("Hm, child of %s is None" % node) continue if not hasattr(child, 'parent'): - print " ERROR: %s has child %s %x with no parent" % (node, child, id(child)) + print(" ERROR: %s has child %s %x with no parent" % ( + node, child, id(child))) elif not child.parent: - print " ERROR: %s has child %s %x with parent %r" % (node, child, id(child), child.parent) + print(" ERROR: %s has child %s %x with parent %r" % ( + node, child, id(child), child.parent)) elif child.parent is not node: - print " ERROR: %s %x has child %s %x with wrong parent %s" % (node, - id(node), child, id(child), child.parent) + print(" ERROR: %s %x has child %s %x with wrong parent %s" % ( + node, id(node), child, id(child), child.parent)) else: ok = True if not ok: - print "lines;", node.lineno, child.lineno - print "of module", node.root(), node.root().name + print("lines;", node.lineno, child.lineno) + print("of module", node.root(), node.root().name) raise AstroidBuildingException _check_children(child) @@ -145,7 +148,7 @@ class TreeTester(object): Module() body = [ Print() - dest = + dest = values = [ ] ] @@ -180,8 +183,8 @@ def _native_repr_tree(self, node, indent, _done=None): if _done is None: _done = set() if node in _done: - self._string += '\nloop in tree: %r (%s)' % (node, - getattr(node, 'lineno', None)) + self._string += '\nloop in tree: %r (%s)' % ( + node, getattr(node, 'lineno', None)) return _done.add(node) self._string += '\n' + indent + '<%s>' % node.__class__.__name__ @@ -197,7 +200,7 @@ def _native_repr_tree(self, node, indent, _done=None): continue if a in ("lineno", "col_offset") and not self.lineno: continue - self._string +='\n' + indent + a + " = " + repr(attr) + self._string += '\n' + indent + a + " = " + repr(attr) for field in node._fields or (): attr = node_dict[field] if attr is None: diff --git a/pymode/libs/easy_install.py b/pymode/libs/easy_install.py new file mode 100644 index 00000000..d87e9840 --- /dev/null +++ b/pymode/libs/easy_install.py @@ -0,0 +1,5 @@ +"""Run the EasyInstall command""" + +if __name__ == '__main__': + from setuptools.command.easy_install import main + main() diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/__init__.py b/pymode/libs/logilab/common/__init__.py similarity index 92% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/__init__.py rename to pymode/libs/logilab/common/__init__.py index 8d063e2c..fc01e4df 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/__init__.py +++ b/pymode/libs/logilab/common/__init__.py @@ -25,7 +25,18 @@ :var IGNORED_EXTENSIONS: file extensions that may usually be ignored """ __docformat__ = "restructuredtext en" -from logilab.common.__pkginfo__ import version as __version__ + +import sys +import types +import pkg_resources + +__version__ = pkg_resources.get_distribution('logilab-common').version + +# deprecated, but keep compatibility with pylint < 1.4.4 +__pkginfo__ = types.ModuleType('__pkginfo__') +__pkginfo__.__package__ = __name__ +__pkginfo__.version = __version__ +sys.modules['logilab.common.__pkginfo__'] = __pkginfo__ STD_BLACKLIST = ('CVS', '.svn', '.hg', 'debian', 'dist', 'build') @@ -57,8 +68,9 @@ def __getitem__(self, attr): class nullobject(object): def __repr__(self): return '' - def __nonzero__(self): + def __bool__(self): return False + __nonzero__ = __bool__ class tempattr(object): def __init__(self, obj, attr, value): @@ -138,6 +150,7 @@ def make_domains(lists): >>> make_domains(['a', 'b'], ['c','d', 'e']) [['a', 'b', 'a', 'b', 'a', 'b'], ['c', 'c', 'd', 'd', 'e', 'e']] """ + from six.moves import range domains = [] for iterable in lists: new_domain = iterable[:] diff --git a/pymode/libs/logilab/common/cache.py b/pymode/libs/logilab/common/cache.py new file mode 100644 index 00000000..11ed1370 --- /dev/null +++ b/pymode/libs/logilab/common/cache.py @@ -0,0 +1,114 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Cache module, with a least recently used algorithm for the management of the +deletion of entries. + + + + +""" +__docformat__ = "restructuredtext en" + +from threading import Lock + +from logilab.common.decorators import locked + +_marker = object() + +class Cache(dict): + """A dictionary like cache. + + inv: + len(self._usage) <= self.size + len(self.data) <= self.size + """ + + def __init__(self, size=100): + """ Warning : Cache.__init__() != dict.__init__(). + Constructor does not take any arguments beside size. + """ + assert size >= 0, 'cache size must be >= 0 (0 meaning no caching)' + self.size = size + self._usage = [] + self._lock = Lock() + super(Cache, self).__init__() + + def _acquire(self): + self._lock.acquire() + + def _release(self): + self._lock.release() + + def _update_usage(self, key): + if not self._usage: + self._usage.append(key) + elif self._usage[-1] != key: + try: + self._usage.remove(key) + except ValueError: + # we are inserting a new key + # check the size of the dictionary + # and remove the oldest item in the cache + if self.size and len(self._usage) >= self.size: + super(Cache, self).__delitem__(self._usage[0]) + del self._usage[0] + self._usage.append(key) + else: + pass # key is already the most recently used key + + def __getitem__(self, key): + value = super(Cache, self).__getitem__(key) + self._update_usage(key) + return value + __getitem__ = locked(_acquire, _release)(__getitem__) + + def __setitem__(self, key, item): + # Just make sure that size > 0 before inserting a new item in the cache + if self.size > 0: + super(Cache, self).__setitem__(key, item) + self._update_usage(key) + __setitem__ = locked(_acquire, _release)(__setitem__) + + def __delitem__(self, key): + super(Cache, self).__delitem__(key) + self._usage.remove(key) + __delitem__ = locked(_acquire, _release)(__delitem__) + + def clear(self): + super(Cache, self).clear() + self._usage = [] + clear = locked(_acquire, _release)(clear) + + def pop(self, key, default=_marker): + if key in self: + self._usage.remove(key) + #if default is _marker: + # return super(Cache, self).pop(key) + return super(Cache, self).pop(key, default) + pop = locked(_acquire, _release)(pop) + + def popitem(self): + raise NotImplementedError() + + def setdefault(self, key, default=None): + raise NotImplementedError() + + def update(self, other): + raise NotImplementedError() + + diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/changelog.py b/pymode/libs/logilab/common/changelog.py similarity index 98% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/changelog.py rename to pymode/libs/logilab/common/changelog.py index 74f51241..2fff2ed6 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/changelog.py +++ b/pymode/libs/logilab/common/changelog.py @@ -49,6 +49,8 @@ import sys from stat import S_IWRITE +from six import string_types + BULLET = '*' SUBBULLET = '-' INDENT = ' ' * 4 @@ -64,7 +66,7 @@ class Version(tuple): correctly printing it as X.Y.Z """ def __new__(cls, versionstr): - if isinstance(versionstr, basestring): + if isinstance(versionstr, string_types): versionstr = versionstr.strip(' :') # XXX (syt) duh? parsed = cls.parse(versionstr) else: @@ -76,7 +78,7 @@ def parse(cls, versionstr): versionstr = versionstr.strip(' :') try: return [int(i) for i in versionstr.split('.')] - except ValueError, ex: + except ValueError as ex: raise ValueError("invalid literal for version '%s' (%s)"%(versionstr, ex)) def __str__(self): diff --git a/pymode/libs/logilab/common/clcommands.py b/pymode/libs/logilab/common/clcommands.py new file mode 100644 index 00000000..4778b99b --- /dev/null +++ b/pymode/libs/logilab/common/clcommands.py @@ -0,0 +1,334 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Helper functions to support command line tools providing more than +one command. + +e.g called as "tool command [options] args..." where and are +command'specific +""" + +from __future__ import print_function + +__docformat__ = "restructuredtext en" + +import sys +import logging +from os.path import basename + +from logilab.common.configuration import Configuration +from logilab.common.logging_ext import init_log, get_threshold +from logilab.common.deprecation import deprecated + + +class BadCommandUsage(Exception): + """Raised when an unknown command is used or when a command is not + correctly used (bad options, too much / missing arguments...). + + Trigger display of command usage. + """ + +class CommandError(Exception): + """Raised when a command can't be processed and we want to display it and + exit, without traceback nor usage displayed. + """ + + +# command line access point #################################################### + +class CommandLine(dict): + """Usage: + + >>> LDI = cli.CommandLine('ldi', doc='Logilab debian installer', + version=version, rcfile=RCFILE) + >>> LDI.register(MyCommandClass) + >>> LDI.register(MyOtherCommandClass) + >>> LDI.run(sys.argv[1:]) + + Arguments: + + * `pgm`, the program name, default to `basename(sys.argv[0])` + + * `doc`, a short description of the command line tool + + * `copyright`, additional doc string that will be appended to the generated + doc + + * `version`, version number of string of the tool. If specified, global + --version option will be available. + + * `rcfile`, path to a configuration file. If specified, global --C/--rc-file + option will be available? self.rcfile = rcfile + + * `logger`, logger to propagate to commands, default to + `logging.getLogger(self.pgm))` + """ + def __init__(self, pgm=None, doc=None, copyright=None, version=None, + rcfile=None, logthreshold=logging.ERROR, + check_duplicated_command=True): + if pgm is None: + pgm = basename(sys.argv[0]) + self.pgm = pgm + self.doc = doc + self.copyright = copyright + self.version = version + self.rcfile = rcfile + self.logger = None + self.logthreshold = logthreshold + self.check_duplicated_command = check_duplicated_command + + def register(self, cls, force=False): + """register the given :class:`Command` subclass""" + assert not self.check_duplicated_command or force or not cls.name in self, \ + 'a command %s is already defined' % cls.name + self[cls.name] = cls + return cls + + def run(self, args): + """main command line access point: + * init logging + * handle global options (-h/--help, --version, -C/--rc-file) + * check command + * run command + + Terminate by :exc:`SystemExit` + """ + init_log(debug=True, # so that we use StreamHandler + logthreshold=self.logthreshold, + logformat='%(levelname)s: %(message)s') + try: + arg = args.pop(0) + except IndexError: + self.usage_and_exit(1) + if arg in ('-h', '--help'): + self.usage_and_exit(0) + if self.version is not None and arg in ('--version'): + print(self.version) + sys.exit(0) + rcfile = self.rcfile + if rcfile is not None and arg in ('-C', '--rc-file'): + try: + rcfile = args.pop(0) + arg = args.pop(0) + except IndexError: + self.usage_and_exit(1) + try: + command = self.get_command(arg) + except KeyError: + print('ERROR: no %s command' % arg) + print() + self.usage_and_exit(1) + try: + sys.exit(command.main_run(args, rcfile)) + except KeyboardInterrupt as exc: + print('Interrupted', end=' ') + if str(exc): + print(': %s' % exc, end=' ') + print() + sys.exit(4) + except BadCommandUsage as err: + print('ERROR:', err) + print() + print(command.help()) + sys.exit(1) + + def create_logger(self, handler, logthreshold=None): + logger = logging.Logger(self.pgm) + logger.handlers = [handler] + if logthreshold is None: + logthreshold = get_threshold(self.logthreshold) + logger.setLevel(logthreshold) + return logger + + def get_command(self, cmd, logger=None): + if logger is None: + logger = self.logger + if logger is None: + logger = self.logger = logging.getLogger(self.pgm) + logger.setLevel(get_threshold(self.logthreshold)) + return self[cmd](logger) + + def usage(self): + """display usage for the main program (i.e. when no command supplied) + and exit + """ + print('usage:', self.pgm, end=' ') + if self.rcfile: + print('[--rc-file=]', end=' ') + print(' [options] ...') + if self.doc: + print('\n%s' % self.doc) + print(''' +Type "%(pgm)s --help" for more information about a specific +command. Available commands are :\n''' % self.__dict__) + max_len = max([len(cmd) for cmd in self]) + padding = ' ' * max_len + for cmdname, cmd in sorted(self.items()): + if not cmd.hidden: + print(' ', (cmdname + padding)[:max_len], cmd.short_description()) + if self.rcfile: + print(''' +Use --rc-file= / -C before the command +to specify a configuration file. Default to %s. +''' % self.rcfile) + print('''%(pgm)s -h/--help + display this usage information and exit''' % self.__dict__) + if self.version: + print('''%(pgm)s -v/--version + display version configuration and exit''' % self.__dict__) + if self.copyright: + print('\n', self.copyright) + + def usage_and_exit(self, status): + self.usage() + sys.exit(status) + + +# base command classes ######################################################### + +class Command(Configuration): + """Base class for command line commands. + + Class attributes: + + * `name`, the name of the command + + * `min_args`, minimum number of arguments, None if unspecified + + * `max_args`, maximum number of arguments, None if unspecified + + * `arguments`, string describing arguments, used in command usage + + * `hidden`, boolean flag telling if the command should be hidden, e.g. does + not appear in help's commands list + + * `options`, options list, as allowed by :mod:configuration + """ + + arguments = '' + name = '' + # hidden from help ? + hidden = False + # max/min args, None meaning unspecified + min_args = None + max_args = None + + @classmethod + def description(cls): + return cls.__doc__.replace(' ', '') + + @classmethod + def short_description(cls): + return cls.description().split('.')[0] + + def __init__(self, logger): + usage = '%%prog %s %s\n\n%s' % (self.name, self.arguments, + self.description()) + Configuration.__init__(self, usage=usage) + self.logger = logger + + def check_args(self, args): + """check command's arguments are provided""" + if self.min_args is not None and len(args) < self.min_args: + raise BadCommandUsage('missing argument') + if self.max_args is not None and len(args) > self.max_args: + raise BadCommandUsage('too many arguments') + + def main_run(self, args, rcfile=None): + """Run the command and return status 0 if everything went fine. + + If :exc:`CommandError` is raised by the underlying command, simply log + the error and return status 2. + + Any other exceptions, including :exc:`BadCommandUsage` will be + propagated. + """ + if rcfile: + self.load_file_configuration(rcfile) + args = self.load_command_line_configuration(args) + try: + self.check_args(args) + self.run(args) + except CommandError as err: + self.logger.error(err) + return 2 + return 0 + + def run(self, args): + """run the command with its specific arguments""" + raise NotImplementedError() + + +class ListCommandsCommand(Command): + """list available commands, useful for bash completion.""" + name = 'listcommands' + arguments = '[command]' + hidden = True + + def run(self, args): + """run the command with its specific arguments""" + if args: + command = args.pop() + cmd = _COMMANDS[command] + for optname, optdict in cmd.options: + print('--help') + print('--' + optname) + else: + commands = sorted(_COMMANDS.keys()) + for command in commands: + cmd = _COMMANDS[command] + if not cmd.hidden: + print(command) + + +# deprecated stuff ############################################################# + +_COMMANDS = CommandLine() + +DEFAULT_COPYRIGHT = '''\ +Copyright (c) 2004-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +http://www.logilab.fr/ -- mailto:contact@logilab.fr''' + +@deprecated('use cls.register(cli)') +def register_commands(commands): + """register existing commands""" + for command_klass in commands: + _COMMANDS.register(command_klass) + +@deprecated('use args.pop(0)') +def main_run(args, doc=None, copyright=None, version=None): + """command line tool: run command specified by argument list (without the + program name). Raise SystemExit with status 0 if everything went fine. + + >>> main_run(sys.argv[1:]) + """ + _COMMANDS.doc = doc + _COMMANDS.copyright = copyright + _COMMANDS.version = version + _COMMANDS.run(args) + +@deprecated('use args.pop(0)') +def pop_arg(args_list, expected_size_after=None, msg="Missing argument"): + """helper function to get and check command line arguments""" + try: + value = args_list.pop(0) + except IndexError: + raise BadCommandUsage(msg) + if expected_size_after is not None and len(args_list) > expected_size_after: + raise BadCommandUsage('too many arguments') + return value + diff --git a/pymode/libs/logilab/common/compat.py b/pymode/libs/logilab/common/compat.py new file mode 100644 index 00000000..f2eb5905 --- /dev/null +++ b/pymode/libs/logilab/common/compat.py @@ -0,0 +1,78 @@ +# pylint: disable=E0601,W0622,W0611 +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Wrappers around some builtins introduced in python 2.3, 2.4 and +2.5, making them available in for earlier versions of python. + +See another compatibility snippets from other projects: + + :mod:`lib2to3.fixes` + :mod:`coverage.backward` + :mod:`unittest2.compatibility` +""" + + +__docformat__ = "restructuredtext en" + +import os +import sys +import types +from warnings import warn + +# not used here, but imported to preserve API +from six.moves import builtins + +if sys.version_info < (3, 0): + str_to_bytes = str + def str_encode(string, encoding): + if isinstance(string, unicode): + return string.encode(encoding) + return str(string) +else: + def str_to_bytes(string): + return str.encode(string) + # we have to ignore the encoding in py3k to be able to write a string into a + # TextIOWrapper or like object (which expect an unicode string) + def str_encode(string, encoding): + return str(string) + +# See also http://bugs.python.org/issue11776 +if sys.version_info[0] == 3: + def method_type(callable, instance, klass): + # api change. klass is no more considered + return types.MethodType(callable, instance) +else: + # alias types otherwise + method_type = types.MethodType + +# Pythons 2 and 3 differ on where to get StringIO +if sys.version_info < (3, 0): + from cStringIO import StringIO + FileIO = file + BytesIO = StringIO + reload = reload +else: + from io import FileIO, BytesIO, StringIO + from imp import reload + +from logilab.common.deprecation import deprecated + +# Other projects import these from here, keep providing them for +# backwards compat +any = deprecated('use builtin "any"')(any) +all = deprecated('use builtin "all"')(all) diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/configuration.py b/pymode/libs/logilab/common/configuration.py similarity index 94% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/configuration.py rename to pymode/libs/logilab/common/configuration.py index fa93a056..b2924277 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/configuration.py +++ b/pymode/libs/logilab/common/configuration.py @@ -96,8 +96,19 @@ multiple=4,5,6 number=3 - >>> + + Note : starting with Python 2.7 ConfigParser is able to take into + account the order of occurrences of the options into a file (by + using an OrderedDict). If you have two options changing some common + state, like a 'disable-all-stuff' and a 'enable-some-stuff-a', their + order of appearance will be significant : the last specified in the + file wins. For earlier version of python and logilab.common newer + than 0.61 the behaviour is unspecified. + """ + +from __future__ import print_function + __docformat__ = "restructuredtext en" __all__ = ('OptionsManagerMixIn', 'OptionsProviderMixIn', @@ -109,11 +120,12 @@ import re from os.path import exists, expanduser from copy import copy -from ConfigParser import ConfigParser, NoOptionError, NoSectionError, \ - DuplicateSectionError from warnings import warn -from logilab.common.compat import callable, raw_input, str_encode as _encode +from six import string_types +from six.moves import range, configparser as cp, input + +from logilab.common.compat import str_encode as _encode from logilab.common.deprecation import deprecated from logilab.common.textutils import normalize_text, unquote from logilab.common import optik_ext @@ -244,23 +256,23 @@ def input_password(optdict, question='password:'): value2 = getpass('confirm: ') if value == value2: return value - print 'password mismatch, try again' + print('password mismatch, try again') def input_string(optdict, question): - value = raw_input(question).strip() + value = input(question).strip() return value or None def _make_input_function(opttype): def input_validator(optdict, question): while True: - value = raw_input(question) + value = input(question) if not value.strip(): return None try: return _call_validator(opttype, optdict, None, value) - except optik_ext.OptionValueError, ex: + except optik_ext.OptionValueError as ex: msg = str(ex).split(':', 1)[-1].strip() - print 'bad value: %s' % msg + print('bad value: %s' % msg) return input_validator INPUT_FUNCTIONS = { @@ -358,7 +370,7 @@ def format_option_value(optdict, value): value = value.pattern elif optdict.get('type') == 'yn': value = value and 'yes' or 'no' - elif isinstance(value, (str, unicode)) and value.isspace(): + elif isinstance(value, string_types) and value.isspace(): value = "'%s'" % value elif optdict.get('type') == 'time' and isinstance(value, (float, int, long)): value = format_time(value) @@ -370,8 +382,8 @@ def ini_format_section(stream, section, options, encoding=None, doc=None): """format an options section using the INI format""" encoding = _get_encoding(encoding, stream) if doc: - print >> stream, _encode(comment(doc), encoding) - print >> stream, '[%s]' % section + print(_encode(comment(doc), encoding), file=stream) + print('[%s]' % section, file=stream) ini_format(stream, options, encoding) def ini_format(stream, options, encoding): @@ -381,37 +393,36 @@ def ini_format(stream, options, encoding): help = optdict.get('help') if help: help = normalize_text(help, line_len=79, indent='# ') - print >> stream - print >> stream, _encode(help, encoding) + print(file=stream) + print(_encode(help, encoding), file=stream) else: - print >> stream + print(file=stream) if value is None: - print >> stream, '#%s=' % optname + print('#%s=' % optname, file=stream) else: value = _encode(value, encoding).strip() - print >> stream, '%s=%s' % (optname, value) + print('%s=%s' % (optname, value), file=stream) format_section = ini_format_section def rest_format_section(stream, section, options, encoding=None, doc=None): - """format an options section using the INI format""" + """format an options section using as ReST formatted output""" encoding = _get_encoding(encoding, stream) if section: - print >> stream, '%s\n%s' % (section, "'"*len(section)) + print('%s\n%s' % (section, "'"*len(section)), file=stream) if doc: - print >> stream, _encode(normalize_text(doc, line_len=79, indent=''), - encoding) - print >> stream + print(_encode(normalize_text(doc, line_len=79, indent=''), encoding), file=stream) + print(file=stream) for optname, optdict, value in options: help = optdict.get('help') - print >> stream, ':%s:' % optname + print(':%s:' % optname, file=stream) if help: help = normalize_text(help, line_len=79, indent=' ') - print >> stream, _encode(help, encoding) + print(_encode(help, encoding), file=stream) if value: value = _encode(format_option_value(optdict, value), encoding) - print >> stream, '' - print >> stream, ' Default: ``%s``' % value.replace("`` ", "```` ``") + print(file=stream) + print(' Default: ``%s``' % value.replace("`` ", "```` ``"), file=stream) # Options Manager ############################################################## @@ -436,7 +447,7 @@ def __init__(self, usage, config_file=None, version=None, quiet=0): def reset_parsers(self, usage='', version=None): # configuration file parser - self.cfgfile_parser = ConfigParser() + self.cfgfile_parser = cp.ConfigParser() # command line parser self.cmdline_parser = optik_ext.OptionParser(usage=usage, version=version) self.cmdline_parser.options_manager = self @@ -521,7 +532,7 @@ def optik_option(self, provider, opt, optdict): args.append('-' + optdict['short']) del optdict['short'] # cleanup option definition dict before giving it to optik - for key in optdict.keys(): + for key in list(optdict.keys()): if not key in self._optik_option_attrs: optdict.pop(key) return args, optdict @@ -568,7 +579,7 @@ def generate_config(self, stream=None, skipsections=(), encoding=None): printed = False for section in sections: if printed: - print >> stream, '\n' + print('\n', file=stream) format_section(stream, section.upper(), options_by_section[section], encoding) printed = True @@ -607,7 +618,7 @@ def read_config_file(self, config_file=None): if opt in self._all_options: break # already processed def helpfunc(option, opt, val, p, level=helplevel): - print self.help(level) + print(self.help(level)) sys.exit(0) helpmsg = '%s verbose help.' % ' '.join(['more'] * helplevel) optdict = {'action' : 'callback', 'callback' : helpfunc, @@ -629,7 +640,7 @@ def helpfunc(option, opt, val, p, level=helplevel): parser._sections[sect.upper()] = values elif not self.quiet: msg = 'No config file found, using default configuration' - print >> sys.stderr, msg + print(msg, file=sys.stderr) return def input_config(self, onlysection=None, inputlevel=0, stream=None): @@ -655,13 +666,13 @@ def load_config_file(self): options provider) """ parser = self.cfgfile_parser - for provider in self.options_providers: - for section, option, optdict in provider.all_options(): - try: - value = parser.get(section, option) - provider.set_option(option, value, optdict=optdict) - except (NoSectionError, NoOptionError), ex: - continue + for section in parser.sections(): + for option, value in parser.items(section): + try: + self.global_set_option(option, value) + except (KeyError, OptionError): + # TODO handle here undeclared options appearing in the config file + continue def load_configuration(self, **kwargs): """override configuration according to given parameters @@ -853,12 +864,12 @@ def input_option(self, option, optdict, inputlevel=99): defaultstr = ': ' else: defaultstr = '(default: %s): ' % format_option_value(optdict, default) - print ':%s:' % option - print optdict.get('help') or option + print(':%s:' % option) + print(optdict.get('help') or option) inputfunc = INPUT_FUNCTIONS[optdict['type']] value = inputfunc(optdict, defaultstr) while default is REQUIRED and not value: - print 'please specify a value' + print('please specify a value') value = inputfunc(optdict, '%s: ' % option) if value is None and default is not None: value = default diff --git a/pymode/libs/logilab/common/daemon.py b/pymode/libs/logilab/common/daemon.py new file mode 100644 index 00000000..40319a43 --- /dev/null +++ b/pymode/libs/logilab/common/daemon.py @@ -0,0 +1,101 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""A daemonize function (for Unices)""" + +__docformat__ = "restructuredtext en" + +import os +import errno +import signal +import sys +import time +import warnings + +from six.moves import range + +def setugid(user): + """Change process user and group ID + + Argument is a numeric user id or a user name""" + try: + from pwd import getpwuid + passwd = getpwuid(int(user)) + except ValueError: + from pwd import getpwnam + passwd = getpwnam(user) + + if hasattr(os, 'initgroups'): # python >= 2.7 + os.initgroups(passwd.pw_name, passwd.pw_gid) + else: + import ctypes + if ctypes.CDLL(None).initgroups(passwd.pw_name, passwd.pw_gid) < 0: + err = ctypes.c_int.in_dll(ctypes.pythonapi,"errno").value + raise OSError(err, os.strerror(err), 'initgroups') + os.setgid(passwd.pw_gid) + os.setuid(passwd.pw_uid) + os.environ['HOME'] = passwd.pw_dir + + +def daemonize(pidfile=None, uid=None, umask=0o77): + """daemonize a Unix process. Set paranoid umask by default. + + Return 1 in the original process, 2 in the first fork, and None for the + second fork (eg daemon process). + """ + # http://www.faqs.org/faqs/unix-faq/programmer/faq/ + # + # fork so the parent can exit + if os.fork(): # launch child and... + return 1 + # disconnect from tty and create a new session + os.setsid() + # fork again so the parent, (the session group leader), can exit. + # as a non-session group leader, we can never regain a controlling + # terminal. + if os.fork(): # launch child again. + return 2 + # move to the root to avoit mount pb + os.chdir('/') + # redirect standard descriptors + null = os.open('/dev/null', os.O_RDWR) + for i in range(3): + try: + os.dup2(null, i) + except OSError as e: + if e.errno != errno.EBADF: + raise + os.close(null) + # filter warnings + warnings.filterwarnings('ignore') + # write pid in a file + if pidfile: + # ensure the directory where the pid-file should be set exists (for + # instance /var/run/cubicweb may be deleted on computer restart) + piddir = os.path.dirname(pidfile) + if not os.path.exists(piddir): + os.makedirs(piddir) + f = file(pidfile, 'w') + f.write(str(os.getpid())) + f.close() + # set umask if specified + if umask is not None: + os.umask(umask) + # change process uid + if uid: + setugid(uid) + return None diff --git a/pymode/libs/logilab/common/date.py b/pymode/libs/logilab/common/date.py new file mode 100644 index 00000000..a093a8a9 --- /dev/null +++ b/pymode/libs/logilab/common/date.py @@ -0,0 +1,335 @@ +# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Date manipulation helper functions.""" +from __future__ import division + +__docformat__ = "restructuredtext en" + +import math +import re +import sys +from locale import getlocale, LC_TIME +from datetime import date, time, datetime, timedelta +from time import strptime as time_strptime +from calendar import monthrange, timegm + +from six.moves import range + +try: + from mx.DateTime import RelativeDateTime, Date, DateTimeType +except ImportError: + endOfMonth = None + DateTimeType = datetime +else: + endOfMonth = RelativeDateTime(months=1, day=-1) + +# NOTE: should we implement a compatibility layer between date representations +# as we have in lgc.db ? + +FRENCH_FIXED_HOLIDAYS = { + 'jour_an': '%s-01-01', + 'fete_travail': '%s-05-01', + 'armistice1945': '%s-05-08', + 'fete_nat': '%s-07-14', + 'assomption': '%s-08-15', + 'toussaint': '%s-11-01', + 'armistice1918': '%s-11-11', + 'noel': '%s-12-25', + } + +FRENCH_MOBILE_HOLIDAYS = { + 'paques2004': '2004-04-12', + 'ascension2004': '2004-05-20', + 'pentecote2004': '2004-05-31', + + 'paques2005': '2005-03-28', + 'ascension2005': '2005-05-05', + 'pentecote2005': '2005-05-16', + + 'paques2006': '2006-04-17', + 'ascension2006': '2006-05-25', + 'pentecote2006': '2006-06-05', + + 'paques2007': '2007-04-09', + 'ascension2007': '2007-05-17', + 'pentecote2007': '2007-05-28', + + 'paques2008': '2008-03-24', + 'ascension2008': '2008-05-01', + 'pentecote2008': '2008-05-12', + + 'paques2009': '2009-04-13', + 'ascension2009': '2009-05-21', + 'pentecote2009': '2009-06-01', + + 'paques2010': '2010-04-05', + 'ascension2010': '2010-05-13', + 'pentecote2010': '2010-05-24', + + 'paques2011': '2011-04-25', + 'ascension2011': '2011-06-02', + 'pentecote2011': '2011-06-13', + + 'paques2012': '2012-04-09', + 'ascension2012': '2012-05-17', + 'pentecote2012': '2012-05-28', + } + +# XXX this implementation cries for multimethod dispatching + +def get_step(dateobj, nbdays=1): + # assume date is either a python datetime or a mx.DateTime object + if isinstance(dateobj, date): + return ONEDAY * nbdays + return nbdays # mx.DateTime is ok with integers + +def datefactory(year, month, day, sampledate): + # assume date is either a python datetime or a mx.DateTime object + if isinstance(sampledate, datetime): + return datetime(year, month, day) + if isinstance(sampledate, date): + return date(year, month, day) + return Date(year, month, day) + +def weekday(dateobj): + # assume date is either a python datetime or a mx.DateTime object + if isinstance(dateobj, date): + return dateobj.weekday() + return dateobj.day_of_week + +def str2date(datestr, sampledate): + # NOTE: datetime.strptime is not an option until we drop py2.4 compat + year, month, day = [int(chunk) for chunk in datestr.split('-')] + return datefactory(year, month, day, sampledate) + +def days_between(start, end): + if isinstance(start, date): + delta = end - start + # datetime.timedelta.days is always an integer (floored) + if delta.seconds: + return delta.days + 1 + return delta.days + else: + return int(math.ceil((end - start).days)) + +def get_national_holidays(begin, end): + """return french national days off between begin and end""" + begin = datefactory(begin.year, begin.month, begin.day, begin) + end = datefactory(end.year, end.month, end.day, end) + holidays = [str2date(datestr, begin) + for datestr in FRENCH_MOBILE_HOLIDAYS.values()] + for year in range(begin.year, end.year+1): + for datestr in FRENCH_FIXED_HOLIDAYS.values(): + date = str2date(datestr % year, begin) + if date not in holidays: + holidays.append(date) + return [day for day in holidays if begin <= day < end] + +def add_days_worked(start, days): + """adds date but try to only take days worked into account""" + step = get_step(start) + weeks, plus = divmod(days, 5) + end = start + ((weeks * 7) + plus) * step + if weekday(end) >= 5: # saturday or sunday + end += (2 * step) + end += len([x for x in get_national_holidays(start, end + step) + if weekday(x) < 5]) * step + if weekday(end) >= 5: # saturday or sunday + end += (2 * step) + return end + +def nb_open_days(start, end): + assert start <= end + step = get_step(start) + days = days_between(start, end) + weeks, plus = divmod(days, 7) + if weekday(start) > weekday(end): + plus -= 2 + elif weekday(end) == 6: + plus -= 1 + open_days = weeks * 5 + plus + nb_week_holidays = len([x for x in get_national_holidays(start, end+step) + if weekday(x) < 5 and x < end]) + open_days -= nb_week_holidays + if open_days < 0: + return 0 + return open_days + +def date_range(begin, end, incday=None, incmonth=None): + """yields each date between begin and end + + :param begin: the start date + :param end: the end date + :param incr: the step to use to iterate over dates. Default is + one day. + :param include: None (means no exclusion) or a function taking a + date as parameter, and returning True if the date + should be included. + + When using mx datetime, you should *NOT* use incmonth argument, use instead + oneDay, oneHour, oneMinute, oneSecond, oneWeek or endOfMonth (to enumerate + months) as `incday` argument + """ + assert not (incday and incmonth) + begin = todate(begin) + end = todate(end) + if incmonth: + while begin < end: + yield begin + begin = next_month(begin, incmonth) + else: + incr = get_step(begin, incday or 1) + while begin < end: + yield begin + begin += incr + +# makes py datetime usable ##################################################### + +ONEDAY = timedelta(days=1) +ONEWEEK = timedelta(days=7) + +try: + strptime = datetime.strptime +except AttributeError: # py < 2.5 + from time import strptime as time_strptime + def strptime(value, format): + return datetime(*time_strptime(value, format)[:6]) + +def strptime_time(value, format='%H:%M'): + return time(*time_strptime(value, format)[3:6]) + +def todate(somedate): + """return a date from a date (leaving unchanged) or a datetime""" + if isinstance(somedate, datetime): + return date(somedate.year, somedate.month, somedate.day) + assert isinstance(somedate, (date, DateTimeType)), repr(somedate) + return somedate + +def totime(somedate): + """return a time from a time (leaving unchanged), date or datetime""" + # XXX mx compat + if not isinstance(somedate, time): + return time(somedate.hour, somedate.minute, somedate.second) + assert isinstance(somedate, (time)), repr(somedate) + return somedate + +def todatetime(somedate): + """return a date from a date (leaving unchanged) or a datetime""" + # take care, datetime is a subclass of date + if isinstance(somedate, datetime): + return somedate + assert isinstance(somedate, (date, DateTimeType)), repr(somedate) + return datetime(somedate.year, somedate.month, somedate.day) + +def datetime2ticks(somedate): + return timegm(somedate.timetuple()) * 1000 + +def ticks2datetime(ticks): + miliseconds, microseconds = divmod(ticks, 1000) + try: + return datetime.fromtimestamp(miliseconds) + except (ValueError, OverflowError): + epoch = datetime.fromtimestamp(0) + nb_days, seconds = divmod(int(miliseconds), 86400) + delta = timedelta(nb_days, seconds=seconds, microseconds=microseconds) + try: + return epoch + delta + except (ValueError, OverflowError): + raise + +def days_in_month(somedate): + return monthrange(somedate.year, somedate.month)[1] + +def days_in_year(somedate): + feb = date(somedate.year, 2, 1) + if days_in_month(feb) == 29: + return 366 + else: + return 365 + +def previous_month(somedate, nbmonth=1): + while nbmonth: + somedate = first_day(somedate) - ONEDAY + nbmonth -= 1 + return somedate + +def next_month(somedate, nbmonth=1): + while nbmonth: + somedate = last_day(somedate) + ONEDAY + nbmonth -= 1 + return somedate + +def first_day(somedate): + return date(somedate.year, somedate.month, 1) + +def last_day(somedate): + return date(somedate.year, somedate.month, days_in_month(somedate)) + +def ustrftime(somedate, fmt='%Y-%m-%d'): + """like strftime, but returns a unicode string instead of an encoded + string which may be problematic with localized date. + """ + if sys.version_info >= (3, 3): + # datetime.date.strftime() supports dates since year 1 in Python >=3.3. + return somedate.strftime(fmt) + else: + try: + if sys.version_info < (3, 0): + encoding = getlocale(LC_TIME)[1] or 'ascii' + return unicode(somedate.strftime(str(fmt)), encoding) + else: + return somedate.strftime(fmt) + except ValueError: + if somedate.year >= 1900: + raise + # datetime is not happy with dates before 1900 + # we try to work around this, assuming a simple + # format string + fields = {'Y': somedate.year, + 'm': somedate.month, + 'd': somedate.day, + } + if isinstance(somedate, datetime): + fields.update({'H': somedate.hour, + 'M': somedate.minute, + 'S': somedate.second}) + fmt = re.sub('%([YmdHMS])', r'%(\1)02d', fmt) + return unicode(fmt) % fields + +def utcdatetime(dt): + if dt.tzinfo is None: + return dt + return (dt.replace(tzinfo=None) - dt.utcoffset()) + +def utctime(dt): + if dt.tzinfo is None: + return dt + return (dt + dt.utcoffset() + dt.dst()).replace(tzinfo=None) + +def datetime_to_seconds(date): + """return the number of seconds since the begining of the day for that date + """ + return date.second+60*date.minute + 3600*date.hour + +def timedelta_to_days(delta): + """return the time delta as a number of seconds""" + return delta.days + delta.seconds / (3600*24) + +def timedelta_to_seconds(delta): + """return the time delta as a fraction of days""" + return delta.days*(3600*24) + delta.seconds diff --git a/pymode/libs/logilab/common/debugger.py b/pymode/libs/logilab/common/debugger.py new file mode 100644 index 00000000..1f540a18 --- /dev/null +++ b/pymode/libs/logilab/common/debugger.py @@ -0,0 +1,214 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Customized version of pdb's default debugger. + +- sets up a history file +- uses ipython if available to colorize lines of code +- overrides list command to search for current block instead + of using 5 lines of context + + + + +""" + +from __future__ import print_function + +__docformat__ = "restructuredtext en" + +try: + import readline +except ImportError: + readline = None +import os +import os.path as osp +import sys +from pdb import Pdb +import inspect + +from logilab.common.compat import StringIO + +try: + from IPython import PyColorize +except ImportError: + def colorize(source, *args): + """fallback colorize function""" + return source + def colorize_source(source, *args): + return source +else: + def colorize(source, start_lineno, curlineno): + """colorize and annotate source with linenos + (as in pdb's list command) + """ + parser = PyColorize.Parser() + output = StringIO() + parser.format(source, output) + annotated = [] + for index, line in enumerate(output.getvalue().splitlines()): + lineno = index + start_lineno + if lineno == curlineno: + annotated.append('%4s\t->\t%s' % (lineno, line)) + else: + annotated.append('%4s\t\t%s' % (lineno, line)) + return '\n'.join(annotated) + + def colorize_source(source): + """colorize given source""" + parser = PyColorize.Parser() + output = StringIO() + parser.format(source, output) + return output.getvalue() + + +def getsource(obj): + """Return the text of the source code for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + IOError is raised if the source code cannot be retrieved.""" + lines, lnum = inspect.getsourcelines(obj) + return ''.join(lines), lnum + + +################################################################ +class Debugger(Pdb): + """custom debugger + + - sets up a history file + - uses ipython if available to colorize lines of code + - overrides list command to search for current block instead + of using 5 lines of context + """ + def __init__(self, tcbk=None): + Pdb.__init__(self) + self.reset() + if tcbk: + while tcbk.tb_next is not None: + tcbk = tcbk.tb_next + self._tcbk = tcbk + self._histfile = os.path.expanduser("~/.pdbhist") + + def setup_history_file(self): + """if readline is available, read pdb history file + """ + if readline is not None: + try: + # XXX try..except shouldn't be necessary + # read_history_file() can accept None + readline.read_history_file(self._histfile) + except IOError: + pass + + def start(self): + """starts the interactive mode""" + self.interaction(self._tcbk.tb_frame, self._tcbk) + + def setup(self, frame, tcbk): + """setup hook: set up history file""" + self.setup_history_file() + Pdb.setup(self, frame, tcbk) + + def set_quit(self): + """quit hook: save commands in the history file""" + if readline is not None: + readline.write_history_file(self._histfile) + Pdb.set_quit(self) + + def complete_p(self, text, line, begin_idx, end_idx): + """provide variable names completion for the ``p`` command""" + namespace = dict(self.curframe.f_globals) + namespace.update(self.curframe.f_locals) + if '.' in text: + return self.attr_matches(text, namespace) + return [varname for varname in namespace if varname.startswith(text)] + + + def attr_matches(self, text, namespace): + """implementation coming from rlcompleter.Completer.attr_matches + Compute matches when text contains a dot. + + Assuming the text is of the form NAME.NAME....[NAME], and is + evaluatable in self.namespace, it will be evaluated and its attributes + (as revealed by dir()) are used as possible completions. (For class + instances, class members are also considered.) + + WARNING: this can still invoke arbitrary C code, if an object + with a __getattr__ hook is evaluated. + + """ + import re + m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) + if not m: + return + expr, attr = m.group(1, 3) + object = eval(expr, namespace) + words = dir(object) + if hasattr(object, '__class__'): + words.append('__class__') + words = words + self.get_class_members(object.__class__) + matches = [] + n = len(attr) + for word in words: + if word[:n] == attr and word != "__builtins__": + matches.append("%s.%s" % (expr, word)) + return matches + + def get_class_members(self, klass): + """implementation coming from rlcompleter.get_class_members""" + ret = dir(klass) + if hasattr(klass, '__bases__'): + for base in klass.__bases__: + ret = ret + self.get_class_members(base) + return ret + + ## specific / overridden commands + def do_list(self, arg): + """overrides default list command to display the surrounding block + instead of 5 lines of context + """ + self.lastcmd = 'list' + if not arg: + try: + source, start_lineno = getsource(self.curframe) + print(colorize(''.join(source), start_lineno, + self.curframe.f_lineno)) + except KeyboardInterrupt: + pass + except IOError: + Pdb.do_list(self, arg) + else: + Pdb.do_list(self, arg) + do_l = do_list + + def do_open(self, arg): + """opens source file corresponding to the current stack level""" + filename = self.curframe.f_code.co_filename + lineno = self.curframe.f_lineno + cmd = 'emacsclient --no-wait +%s %s' % (lineno, filename) + os.system(cmd) + + do_o = do_open + +def pm(): + """use our custom debugger""" + dbg = Debugger(sys.last_traceback) + dbg.start() + +def set_trace(): + Debugger().set_trace(sys._getframe().f_back) diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/decorators.py b/pymode/libs/logilab/common/decorators.py similarity index 95% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/decorators.py rename to pymode/libs/logilab/common/decorators.py index 34bbd3a9..beafa202 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/decorators.py +++ b/pymode/libs/logilab/common/decorators.py @@ -16,27 +16,28 @@ # You should have received a copy of the GNU Lesser General Public License along # with logilab-common. If not, see . """ A few useful function/method decorators. """ + +from __future__ import print_function + __docformat__ = "restructuredtext en" import sys import types from time import clock, time +from inspect import isgeneratorfunction, getargspec -from logilab.common.compat import callable, method_type +from logilab.common.compat import method_type # XXX rewrite so we can use the decorator syntax when keyarg has to be specified -def _is_generator_function(callableobj): - return callableobj.func_code.co_flags & 0x20 - class cached_decorator(object): def __init__(self, cacheattr=None, keyarg=None): self.cacheattr = cacheattr self.keyarg = keyarg def __call__(self, callableobj=None): - assert not _is_generator_function(callableobj), \ + assert not isgeneratorfunction(callableobj), \ 'cannot cache generator function: %s' % callableobj - if callableobj.func_code.co_argcount == 1 or self.keyarg == 0: + if len(getargspec(callableobj).args) == 1 or self.keyarg == 0: cache = _SingleValueCache(callableobj, self.cacheattr) elif self.keyarg: cache = _MultiValuesKeyArgCache(callableobj, self.keyarg, self.cacheattr) @@ -68,7 +69,6 @@ def wrapped(*args, **kwargs): try: wrapped.__doc__ = self.callable.__doc__ wrapped.__name__ = self.callable.__name__ - wrapped.func_name = self.callable.func_name except: pass return wrapped @@ -227,8 +227,8 @@ def wrap(*args, **kwargs): t = time() c = clock() res = f(*args, **kwargs) - print '%s clock: %.9f / time: %.9f' % (f.__name__, - clock() - c, time() - t) + print('%s clock: %.9f / time: %.9f' % (f.__name__, + clock() - c, time() - t)) return res return wrap diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/deprecation.py b/pymode/libs/logilab/common/deprecation.py similarity index 98% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/deprecation.py rename to pymode/libs/logilab/common/deprecation.py index 02e4edbb..1c81b638 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/deprecation.py +++ b/pymode/libs/logilab/common/deprecation.py @@ -78,7 +78,7 @@ def deprecated(self, version=None, reason=None, stacklevel=2, name=None, doc=Non def decorator(func): message = reason or 'The function "%s" is deprecated' if '%s' in message: - message %= func.func_name + message %= func.__name__ def wrapped(*args, **kwargs): self.warn(version, message, stacklevel+1) return func(*args, **kwargs) @@ -125,11 +125,12 @@ def class_renamed(self, version, old_name, new_class, message=None): return self.class_deprecated(version)(old_name, (new_class,), clsdict) except (NameError, TypeError): # old-style class + warn = self.warn class DeprecatedClass(new_class): """FIXME: There might be a better way to handle old/new-style class """ def __init__(self, *args, **kwargs): - self.warn(version, message, stacklevel=3) + warn(version, message, stacklevel=3) new_class.__init__(self, *args, **kwargs) return DeprecatedClass diff --git a/pymode/libs/logilab/common/fileutils.py b/pymode/libs/logilab/common/fileutils.py new file mode 100644 index 00000000..b30cf5f8 --- /dev/null +++ b/pymode/libs/logilab/common/fileutils.py @@ -0,0 +1,404 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""File and file-path manipulation utilities. + +:group path manipulation: first_level_directory, relative_path, is_binary,\ +get_by_ext, remove_dead_links +:group file manipulation: norm_read, norm_open, lines, stream_lines, lines,\ +write_open_mode, ensure_fs_mode, export +:sort: path manipulation, file manipulation +""" + +from __future__ import print_function + +__docformat__ = "restructuredtext en" + +import sys +import shutil +import mimetypes +from os.path import isabs, isdir, islink, split, exists, normpath, join +from os.path import abspath +from os import sep, mkdir, remove, listdir, stat, chmod, walk +from stat import ST_MODE, S_IWRITE + +from logilab.common import STD_BLACKLIST as BASE_BLACKLIST, IGNORED_EXTENSIONS +from logilab.common.shellutils import find +from logilab.common.deprecation import deprecated +from logilab.common.compat import FileIO + +def first_level_directory(path): + """Return the first level directory of a path. + + >>> first_level_directory('home/syt/work') + 'home' + >>> first_level_directory('/home/syt/work') + '/' + >>> first_level_directory('work') + 'work' + >>> + + :type path: str + :param path: the path for which we want the first level directory + + :rtype: str + :return: the first level directory appearing in `path` + """ + head, tail = split(path) + while head and tail: + head, tail = split(head) + if tail: + return tail + # path was absolute, head is the fs root + return head + +def abspath_listdir(path): + """Lists path's content using absolute paths. + + >>> os.listdir('/home') + ['adim', 'alf', 'arthur', 'auc'] + >>> abspath_listdir('/home') + ['/home/adim', '/home/alf', '/home/arthur', '/home/auc'] + """ + path = abspath(path) + return [join(path, filename) for filename in listdir(path)] + + +def is_binary(filename): + """Return true if filename may be a binary file, according to it's + extension. + + :type filename: str + :param filename: the name of the file + + :rtype: bool + :return: + true if the file is a binary file (actually if it's mime type + isn't beginning by text/) + """ + try: + return not mimetypes.guess_type(filename)[0].startswith('text') + except AttributeError: + return 1 + + +def write_open_mode(filename): + """Return the write mode that should used to open file. + + :type filename: str + :param filename: the name of the file + + :rtype: str + :return: the mode that should be use to open the file ('w' or 'wb') + """ + if is_binary(filename): + return 'wb' + return 'w' + + +def ensure_fs_mode(filepath, desired_mode=S_IWRITE): + """Check that the given file has the given mode(s) set, else try to + set it. + + :type filepath: str + :param filepath: path of the file + + :type desired_mode: int + :param desired_mode: + ORed flags describing the desired mode. Use constants from the + `stat` module for file permission's modes + """ + mode = stat(filepath)[ST_MODE] + if not mode & desired_mode: + chmod(filepath, mode | desired_mode) + + +# XXX (syt) unused? kill? +class ProtectedFile(FileIO): + """A special file-object class that automatically does a 'chmod +w' when + needed. + + XXX: for now, the way it is done allows 'normal file-objects' to be + created during the ProtectedFile object lifetime. + One way to circumvent this would be to chmod / unchmod on each + write operation. + + One other way would be to : + + - catch the IOError in the __init__ + + - if IOError, then create a StringIO object + + - each write operation writes in this StringIO object + + - on close()/del(), write/append the StringIO content to the file and + do the chmod only once + """ + def __init__(self, filepath, mode): + self.original_mode = stat(filepath)[ST_MODE] + self.mode_changed = False + if mode in ('w', 'a', 'wb', 'ab'): + if not self.original_mode & S_IWRITE: + chmod(filepath, self.original_mode | S_IWRITE) + self.mode_changed = True + FileIO.__init__(self, filepath, mode) + + def _restore_mode(self): + """restores the original mode if needed""" + if self.mode_changed: + chmod(self.name, self.original_mode) + # Don't re-chmod in case of several restore + self.mode_changed = False + + def close(self): + """restore mode before closing""" + self._restore_mode() + FileIO.close(self) + + def __del__(self): + if not self.closed: + self.close() + + +class UnresolvableError(Exception): + """Exception raised by relative path when it's unable to compute relative + path between two paths. + """ + +def relative_path(from_file, to_file): + """Try to get a relative path from `from_file` to `to_file` + (path will be absolute if to_file is an absolute file). This function + is useful to create link in `from_file` to `to_file`. This typical use + case is used in this function description. + + If both files are relative, they're expected to be relative to the same + directory. + + >>> relative_path( from_file='toto/index.html', to_file='index.html') + '../index.html' + >>> relative_path( from_file='index.html', to_file='toto/index.html') + 'toto/index.html' + >>> relative_path( from_file='tutu/index.html', to_file='toto/index.html') + '../toto/index.html' + >>> relative_path( from_file='toto/index.html', to_file='/index.html') + '/index.html' + >>> relative_path( from_file='/toto/index.html', to_file='/index.html') + '../index.html' + >>> relative_path( from_file='/toto/index.html', to_file='/toto/summary.html') + 'summary.html' + >>> relative_path( from_file='index.html', to_file='index.html') + '' + >>> relative_path( from_file='/index.html', to_file='toto/index.html') + Traceback (most recent call last): + File "", line 1, in ? + File "", line 37, in relative_path + UnresolvableError + >>> relative_path( from_file='/index.html', to_file='/index.html') + '' + >>> + + :type from_file: str + :param from_file: source file (where links will be inserted) + + :type to_file: str + :param to_file: target file (on which links point) + + :raise UnresolvableError: if it has been unable to guess a correct path + + :rtype: str + :return: the relative path of `to_file` from `from_file` + """ + from_file = normpath(from_file) + to_file = normpath(to_file) + if from_file == to_file: + return '' + if isabs(to_file): + if not isabs(from_file): + return to_file + elif isabs(from_file): + raise UnresolvableError() + from_parts = from_file.split(sep) + to_parts = to_file.split(sep) + idem = 1 + result = [] + while len(from_parts) > 1: + dirname = from_parts.pop(0) + if idem and len(to_parts) > 1 and dirname == to_parts[0]: + to_parts.pop(0) + else: + idem = 0 + result.append('..') + result += to_parts + return sep.join(result) + + +def norm_read(path): + """Return the content of the file with normalized line feeds. + + :type path: str + :param path: path to the file to read + + :rtype: str + :return: the content of the file with normalized line feeds + """ + return open(path, 'U').read() +norm_read = deprecated("use \"open(path, 'U').read()\"")(norm_read) + +def norm_open(path): + """Return a stream for a file with content with normalized line feeds. + + :type path: str + :param path: path to the file to open + + :rtype: file or StringIO + :return: the opened file with normalized line feeds + """ + return open(path, 'U') +norm_open = deprecated("use \"open(path, 'U')\"")(norm_open) + +def lines(path, comments=None): + """Return a list of non empty lines in the file located at `path`. + + :type path: str + :param path: path to the file + + :type comments: str or None + :param comments: + optional string which can be used to comment a line in the file + (i.e. lines starting with this string won't be returned) + + :rtype: list + :return: + a list of stripped line in the file, without empty and commented + lines + + :warning: at some point this function will probably return an iterator + """ + stream = open(path, 'U') + result = stream_lines(stream, comments) + stream.close() + return result + + +def stream_lines(stream, comments=None): + """Return a list of non empty lines in the given `stream`. + + :type stream: object implementing 'xreadlines' or 'readlines' + :param stream: file like object + + :type comments: str or None + :param comments: + optional string which can be used to comment a line in the file + (i.e. lines starting with this string won't be returned) + + :rtype: list + :return: + a list of stripped line in the file, without empty and commented + lines + + :warning: at some point this function will probably return an iterator + """ + try: + readlines = stream.xreadlines + except AttributeError: + readlines = stream.readlines + result = [] + for line in readlines(): + line = line.strip() + if line and (comments is None or not line.startswith(comments)): + result.append(line) + return result + + +def export(from_dir, to_dir, + blacklist=BASE_BLACKLIST, ignore_ext=IGNORED_EXTENSIONS, + verbose=0): + """Make a mirror of `from_dir` in `to_dir`, omitting directories and + files listed in the black list or ending with one of the given + extensions. + + :type from_dir: str + :param from_dir: directory to export + + :type to_dir: str + :param to_dir: destination directory + + :type blacklist: list or tuple + :param blacklist: + list of files or directories to ignore, default to the content of + `BASE_BLACKLIST` + + :type ignore_ext: list or tuple + :param ignore_ext: + list of extensions to ignore, default to the content of + `IGNORED_EXTENSIONS` + + :type verbose: bool + :param verbose: + flag indicating whether information about exported files should be + printed to stderr, default to False + """ + try: + mkdir(to_dir) + except OSError: + pass # FIXME we should use "exists" if the point is about existing dir + # else (permission problems?) shouldn't return / raise ? + for directory, dirnames, filenames in walk(from_dir): + for norecurs in blacklist: + try: + dirnames.remove(norecurs) + except ValueError: + continue + for dirname in dirnames: + src = join(directory, dirname) + dest = to_dir + src[len(from_dir):] + if isdir(src): + if not exists(dest): + mkdir(dest) + for filename in filenames: + # don't include binary files + # endswith does not accept tuple in 2.4 + if any([filename.endswith(ext) for ext in ignore_ext]): + continue + src = join(directory, filename) + dest = to_dir + src[len(from_dir):] + if verbose: + print(src, '->', dest, file=sys.stderr) + if exists(dest): + remove(dest) + shutil.copy2(src, dest) + + +def remove_dead_links(directory, verbose=0): + """Recursively traverse directory and remove all dead links. + + :type directory: str + :param directory: directory to cleanup + + :type verbose: bool + :param verbose: + flag indicating whether information about deleted links should be + printed to stderr, default to False + """ + for dirpath, dirname, filenames in walk(directory): + for filename in dirnames + filenames: + src = join(dirpath, filename) + if islink(src) and not exists(src): + if verbose: + print('remove dead link', src) + remove(src) + diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/graph.py b/pymode/libs/logilab/common/graph.py similarity index 93% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/graph.py rename to pymode/libs/logilab/common/graph.py index d62e8c09..cef1c984 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/graph.py +++ b/pymode/libs/logilab/common/graph.py @@ -29,6 +29,7 @@ import sys import tempfile import codecs +import errno def escape(value): """Make usable in a dot file.""" @@ -63,7 +64,7 @@ def __init__(self, graphname, rankdir=None, size=None, ratio=None, assert charset.lower() in ('utf-8', 'iso-8859-1', 'latin1'), \ 'unsupported charset %s' % charset self.emit('charset="%s"' % charset) - for param in additionnal_param.iteritems(): + for param in sorted(additionnal_param.items()): self.emit('='.join(param)) def get_source(self): @@ -114,13 +115,18 @@ def generate(self, outputfile=None, dotfile=None, mapfile=None): use_shell = True else: use_shell = False - if mapfile: - subprocess.call([self.renderer, '-Tcmapx', '-o', mapfile, '-T', target, dot_sourcepath, '-o', outputfile], - shell=use_shell) - else: - subprocess.call([self.renderer, '-T', target, - dot_sourcepath, '-o', outputfile], - shell=use_shell) + try: + if mapfile: + subprocess.call([self.renderer, '-Tcmapx', '-o', mapfile, '-T', target, dot_sourcepath, '-o', outputfile], + shell=use_shell) + else: + subprocess.call([self.renderer, '-T', target, + dot_sourcepath, '-o', outputfile], + shell=use_shell) + except OSError as e: + if e.errno == errno.ENOENT: + e.strerror = 'File not found: {0}'.format(self.renderer) + raise os.unlink(dot_sourcepath) return outputfile diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/interface.py b/pymode/libs/logilab/common/interface.py similarity index 100% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/interface.py rename to pymode/libs/logilab/common/interface.py diff --git a/pymode/libs/logilab/common/logging_ext.py b/pymode/libs/logilab/common/logging_ext.py new file mode 100644 index 00000000..3b6a580a --- /dev/null +++ b/pymode/libs/logilab/common/logging_ext.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Extends the logging module from the standard library.""" + +__docformat__ = "restructuredtext en" + +import os +import sys +import logging + +from six import string_types + +from logilab.common.textutils import colorize_ansi + + +def set_log_methods(cls, logger): + """bind standard logger's methods as methods on the class""" + cls.__logger = logger + for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'): + setattr(cls, attr, getattr(logger, attr)) + + +def xxx_cyan(record): + if 'XXX' in record.message: + return 'cyan' + +class ColorFormatter(logging.Formatter): + """ + A color Formatter for the logging standard module. + + By default, colorize CRITICAL and ERROR in red, WARNING in orange, INFO in + green and DEBUG in yellow. + + self.colors is customizable via the 'color' constructor argument (dictionary). + + self.colorfilters is a list of functions that get the LogRecord + and return a color name or None. + """ + + def __init__(self, fmt=None, datefmt=None, colors=None): + logging.Formatter.__init__(self, fmt, datefmt) + self.colorfilters = [] + self.colors = {'CRITICAL': 'red', + 'ERROR': 'red', + 'WARNING': 'magenta', + 'INFO': 'green', + 'DEBUG': 'yellow', + } + if colors is not None: + assert isinstance(colors, dict) + self.colors.update(colors) + + def format(self, record): + msg = logging.Formatter.format(self, record) + if record.levelname in self.colors: + color = self.colors[record.levelname] + return colorize_ansi(msg, color) + else: + for cf in self.colorfilters: + color = cf(record) + if color: + return colorize_ansi(msg, color) + return msg + +def set_color_formatter(logger=None, **kw): + """ + Install a color formatter on the 'logger'. If not given, it will + defaults to the default logger. + + Any additional keyword will be passed as-is to the ColorFormatter + constructor. + """ + if logger is None: + logger = logging.getLogger() + if not logger.handlers: + logging.basicConfig() + format_msg = logger.handlers[0].formatter._fmt + fmt = ColorFormatter(format_msg, **kw) + fmt.colorfilters.append(xxx_cyan) + logger.handlers[0].setFormatter(fmt) + + +LOG_FORMAT = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s' +LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S' + +def get_handler(debug=False, syslog=False, logfile=None, rotation_parameters=None): + """get an apropriate handler according to given parameters""" + if os.environ.get('APYCOT_ROOT'): + handler = logging.StreamHandler(sys.stdout) + if debug: + handler = logging.StreamHandler() + elif logfile is None: + if syslog: + from logging import handlers + handler = handlers.SysLogHandler() + else: + handler = logging.StreamHandler() + else: + try: + if rotation_parameters is None: + if os.name == 'posix' and sys.version_info >= (2, 6): + from logging.handlers import WatchedFileHandler + handler = WatchedFileHandler(logfile) + else: + handler = logging.FileHandler(logfile) + else: + from logging.handlers import TimedRotatingFileHandler + handler = TimedRotatingFileHandler( + logfile, **rotation_parameters) + except IOError: + handler = logging.StreamHandler() + return handler + +def get_threshold(debug=False, logthreshold=None): + if logthreshold is None: + if debug: + logthreshold = logging.DEBUG + else: + logthreshold = logging.ERROR + elif isinstance(logthreshold, string_types): + logthreshold = getattr(logging, THRESHOLD_MAP.get(logthreshold, + logthreshold)) + return logthreshold + +def _colorable_terminal(): + isatty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty() + if not isatty: + return False + if os.name == 'nt': + try: + from colorama import init as init_win32_colors + except ImportError: + return False + init_win32_colors() + return True + +def get_formatter(logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT): + if _colorable_terminal(): + fmt = ColorFormatter(logformat, logdateformat) + def col_fact(record): + if 'XXX' in record.message: + return 'cyan' + if 'kick' in record.message: + return 'red' + fmt.colorfilters.append(col_fact) + else: + fmt = logging.Formatter(logformat, logdateformat) + return fmt + +def init_log(debug=False, syslog=False, logthreshold=None, logfile=None, + logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT, fmt=None, + rotation_parameters=None, handler=None): + """init the log service""" + logger = logging.getLogger() + if handler is None: + handler = get_handler(debug, syslog, logfile, rotation_parameters) + # only addHandler and removeHandler method while I would like a setHandler + # method, so do it this way :$ + logger.handlers = [handler] + logthreshold = get_threshold(debug, logthreshold) + logger.setLevel(logthreshold) + if fmt is None: + if debug: + fmt = get_formatter(logformat=logformat, logdateformat=logdateformat) + else: + fmt = logging.Formatter(logformat, logdateformat) + handler.setFormatter(fmt) + return handler + +# map logilab.common.logger thresholds to logging thresholds +THRESHOLD_MAP = {'LOG_DEBUG': 'DEBUG', + 'LOG_INFO': 'INFO', + 'LOG_NOTICE': 'INFO', + 'LOG_WARN': 'WARNING', + 'LOG_WARNING': 'WARNING', + 'LOG_ERR': 'ERROR', + 'LOG_ERROR': 'ERROR', + 'LOG_CRIT': 'CRITICAL', + } diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/modutils.py b/pymode/libs/logilab/common/modutils.py similarity index 94% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/modutils.py rename to pymode/libs/logilab/common/modutils.py index 27568412..dd725d24 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/modutils.py +++ b/pymode/libs/logilab/common/modutils.py @@ -25,9 +25,8 @@ :var STD_LIB_DIR: directory where standard modules are located :type BUILTIN_MODULES: dict -:var BUILTIN_MODULES: dictionary with builtin module names has key +:var BUILTIN_MODULES: dictionary with builtin module names as key """ -from __future__ import with_statement __docformat__ = "restructuredtext en" @@ -38,6 +37,8 @@ from distutils.sysconfig import get_config_var, get_python_lib, get_python_version from distutils.errors import DistutilsPlatformError +from six.moves import range + try: import zipimport except ImportError: @@ -61,7 +62,7 @@ PY_COMPILED_EXTS = ('so',) try: - STD_LIB_DIR = get_python_lib(standard_lib=1) + STD_LIB_DIR = get_python_lib(standard_lib=True) # get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to # non-valid path, see https://bugs.pypy.org/issue1164 except DistutilsPlatformError: @@ -69,8 +70,7 @@ EXT_LIB_DIR = get_python_lib() -BUILTIN_MODULES = dict(zip(sys.builtin_module_names, - [1]*len(sys.builtin_module_names))) +BUILTIN_MODULES = dict.fromkeys(sys.builtin_module_names, True) class NoSourceFile(Exception): @@ -93,14 +93,14 @@ def _getobj(self): def __getattribute__(self, attr): try: return super(LazyObject, self).__getattribute__(attr) - except AttributeError, ex: + except AttributeError as ex: return getattr(self._getobj(), attr) def __call__(self, *args, **kwargs): return self._getobj()(*args, **kwargs) -def load_module_from_name(dotted_name, path=None, use_sys=1): +def load_module_from_name(dotted_name, path=None, use_sys=True): """Load a Python module from its name. :type dotted_name: str @@ -125,7 +125,7 @@ def load_module_from_name(dotted_name, path=None, use_sys=1): return load_module_from_modpath(dotted_name.split('.'), path, use_sys) -def load_module_from_modpath(parts, path=None, use_sys=1): +def load_module_from_modpath(parts, path=None, use_sys=True): """Load a python module from its splitted name. :type parts: list(str) or tuple(str) @@ -169,14 +169,16 @@ def load_module_from_modpath(parts, path=None, use_sys=1): if prevmodule: setattr(prevmodule, part, module) _file = getattr(module, '__file__', '') + prevmodule = module + if not _file and _is_namespace(curname): + continue if not _file and len(modpath) != len(parts): raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]) ) path = [dirname( _file )] - prevmodule = module return module -def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None): +def load_module_from_file(filepath, path=None, use_sys=True, extrapath=None): """Load a Python module from it's path. :type filepath: str @@ -204,9 +206,11 @@ def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None): def _check_init(path, mod_path): """check there are some __init__.py all along the way""" + modpath = [] for part in mod_path: + modpath.append(part) path = join(path, part) - if not _has_init(path): + if not _is_namespace('.'.join(modpath)) and not _has_init(path): return False return True @@ -455,13 +459,16 @@ def get_source_file(filename, include_no_ext=False): def cleanup_sys_modules(directories): """remove submodules of `directories` from `sys.modules`""" - for modname, module in sys.modules.items(): + cleaned = [] + for modname, module in list(sys.modules.items()): modfile = getattr(module, '__file__', None) if modfile: for directory in directories: if modfile.startswith(directory): + cleaned.append(modname) del sys.modules[modname] break + return cleaned def is_python_source(filename): @@ -472,7 +479,6 @@ def is_python_source(filename): return splitext(filename)[1][1:] in PY_SOURCE_EXTS - def is_standard_module(modname, std_path=(STD_LIB_DIR,)): """try to guess if a module is a standard python module (by default, see `std_path` parameter's description) @@ -481,7 +487,7 @@ def is_standard_module(modname, std_path=(STD_LIB_DIR,)): :param modname: name of the module we are interested in :type std_path: list(str) or tuple(str) - :param std_path: list of path considered has standard + :param std_path: list of path considered as standard :rtype: bool @@ -489,24 +495,28 @@ def is_standard_module(modname, std_path=(STD_LIB_DIR,)): true if the module: - is located on the path listed in one of the directory in `std_path` - is a built-in module + + Note: this function is known to return wrong values when inside virtualenv. + See https://www.logilab.org/ticket/294756. """ modname = modname.split('.')[0] try: filename = file_from_modpath([modname]) - except ImportError, ex: + except ImportError as ex: # import failed, i'm probably not so wrong by supposing it's # not standard... - return 0 + return False # modules which are not living in a file are considered standard # (sys and __builtin__ for instance) if filename is None: - return 1 + # we assume there are no namespaces in stdlib + return not _is_namespace(modname) filename = abspath(filename) if filename.startswith(EXT_LIB_DIR): - return 0 + return False for path in std_path: if filename.startswith(abspath(path)): - return 1 + return True return False @@ -581,6 +591,12 @@ def _search_zip(modpath, pic): except ImportError: pkg_resources = None + +def _is_namespace(modname): + return (pkg_resources is not None + and modname in pkg_resources._namespace_packages) + + def _module_file(modpath, path=None): """get a module type / file path @@ -612,11 +628,13 @@ def _module_file(modpath, path=None): except AttributeError: checkeggs = False # pkg_resources support (aka setuptools namespace packages) - if pkg_resources is not None and modpath[0] in pkg_resources._namespace_packages and len(modpath) > 1: + if (_is_namespace(modpath[0]) and modpath[0] in sys.modules): # setuptools has added into sys.modules a module object with proper # __path__, get back information from there module = sys.modules[modpath.pop(0)] path = module.__path__ + if not modpath: + return C_BUILTIN, None imported = [] while modpath: modname = modpath[0] diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/optik_ext.py b/pymode/libs/logilab/common/optik_ext.py similarity index 96% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/optik_ext.py rename to pymode/libs/logilab/common/optik_ext.py index 49d685b1..1fd2a7f8 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/optik_ext.py +++ b/pymode/libs/logilab/common/optik_ext.py @@ -46,6 +46,8 @@ argument of this type will be converted to a float value in bytes according to byte units (b, kb, mb, gb, tb) """ +from __future__ import print_function + __docformat__ = "restructuredtext en" import re @@ -65,7 +67,9 @@ except ImportError: HAS_MX_DATETIME = False -from logilab.common.textutils import splitstrip +from logilab.common.textutils import splitstrip, TIME_UNITS, BYTE_UNITS, \ + apply_units + def check_regexp(option, opt, value): """check a regexp value by trying to compile it @@ -165,18 +169,15 @@ def check_color(option, opt, value): raise OptionValueError(msg % (opt, value)) def check_time(option, opt, value): - from logilab.common.textutils import TIME_UNITS, apply_units if isinstance(value, (int, long, float)): return value return apply_units(value, TIME_UNITS) def check_bytes(option, opt, value): - from logilab.common.textutils import BYTE_UNITS, apply_units if hasattr(value, '__int__'): return value return apply_units(value, BYTE_UNITS) -import types class Option(BaseOption): """override optik.Option to add some new option types @@ -211,7 +212,7 @@ def _check_choice(self): if self.choices is None: raise OptionError( "must supply a list of choices for type 'choice'", self) - elif type(self.choices) not in (types.TupleType, types.ListType): + elif not isinstance(self.choices, (tuple, list)): raise OptionError( "choices must be a list of strings ('%s' supplied)" % str(type(self.choices)).split("'")[1], self) @@ -382,9 +383,9 @@ def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout, level=0): formatter = ManHelpFormatter() formatter.output_level = level formatter.parser = optparser - print >> stream, formatter.format_head(optparser, pkginfo, section) - print >> stream, optparser.format_option_help(formatter) - print >> stream, formatter.format_tail(pkginfo) + print(formatter.format_head(optparser, pkginfo, section), file=stream) + print(optparser.format_option_help(formatter), file=stream) + print(formatter.format_tail(pkginfo), file=stream) __all__ = ('OptionParser', 'Option', 'OptionGroup', 'OptionValueError', diff --git a/pymode/libs/logilab/common/optparser.py b/pymode/libs/logilab/common/optparser.py new file mode 100644 index 00000000..aa17750e --- /dev/null +++ b/pymode/libs/logilab/common/optparser.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Extend OptionParser with commands. + +Example: + +>>> parser = OptionParser() +>>> parser.usage = '%prog COMMAND [options] ...' +>>> parser.add_command('build', 'mymod.build') +>>> parser.add_command('clean', run_clean, add_opt_clean) +>>> run, options, args = parser.parse_command(sys.argv[1:]) +>>> return run(options, args[1:]) + +With mymod.build that defines two functions run and add_options +""" +from __future__ import print_function + +__docformat__ = "restructuredtext en" + +from warnings import warn +warn('lgc.optparser module is deprecated, use lgc.clcommands instead', DeprecationWarning, + stacklevel=2) + +import sys +import optparse + +class OptionParser(optparse.OptionParser): + + def __init__(self, *args, **kwargs): + optparse.OptionParser.__init__(self, *args, **kwargs) + self._commands = {} + self.min_args, self.max_args = 0, 1 + + def add_command(self, name, mod_or_funcs, help=''): + """name of the command, name of module or tuple of functions + (run, add_options) + """ + assert isinstance(mod_or_funcs, str) or isinstance(mod_or_funcs, tuple), \ + "mod_or_funcs has to be a module name or a tuple of functions" + self._commands[name] = (mod_or_funcs, help) + + def print_main_help(self): + optparse.OptionParser.print_help(self) + print('\ncommands:') + for cmdname, (_, help) in self._commands.items(): + print('% 10s - %s' % (cmdname, help)) + + def parse_command(self, args): + if len(args) == 0: + self.print_main_help() + sys.exit(1) + cmd = args[0] + args = args[1:] + if cmd not in self._commands: + if cmd in ('-h', '--help'): + self.print_main_help() + sys.exit(0) + elif self.version is not None and cmd == "--version": + self.print_version() + sys.exit(0) + self.error('unknown command') + self.prog = '%s %s' % (self.prog, cmd) + mod_or_f, help = self._commands[cmd] + # optparse inserts self.description between usage and options help + self.description = help + if isinstance(mod_or_f, str): + exec('from %s import run, add_options' % mod_or_f) + else: + run, add_options = mod_or_f + add_options(self) + (options, args) = self.parse_args(args) + if not (self.min_args <= len(args) <= self.max_args): + self.error('incorrect number of arguments') + return run, options, args + + diff --git a/pymode/libs/logilab/common/proc.py b/pymode/libs/logilab/common/proc.py new file mode 100644 index 00000000..c27356c6 --- /dev/null +++ b/pymode/libs/logilab/common/proc.py @@ -0,0 +1,277 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""module providing: +* process information (linux specific: rely on /proc) +* a class for resource control (memory / time / cpu time) + +This module doesn't work on windows platforms (only tested on linux) + +:organization: Logilab + + + +""" +__docformat__ = "restructuredtext en" + +import os +import stat +from resource import getrlimit, setrlimit, RLIMIT_CPU, RLIMIT_AS +from signal import signal, SIGXCPU, SIGKILL, SIGUSR2, SIGUSR1 +from threading import Timer, currentThread, Thread, Event +from time import time + +from logilab.common.tree import Node + +class NoSuchProcess(Exception): pass + +def proc_exists(pid): + """check the a pid is registered in /proc + raise NoSuchProcess exception if not + """ + if not os.path.exists('/proc/%s' % pid): + raise NoSuchProcess() + +PPID = 3 +UTIME = 13 +STIME = 14 +CUTIME = 15 +CSTIME = 16 +VSIZE = 22 + +class ProcInfo(Node): + """provide access to process information found in /proc""" + + def __init__(self, pid): + self.pid = int(pid) + Node.__init__(self, self.pid) + proc_exists(self.pid) + self.file = '/proc/%s/stat' % self.pid + self.ppid = int(self.status()[PPID]) + + def memory_usage(self): + """return the memory usage of the process in Ko""" + try : + return int(self.status()[VSIZE]) + except IOError: + return 0 + + def lineage_memory_usage(self): + return self.memory_usage() + sum([child.lineage_memory_usage() + for child in self.children]) + + def time(self, children=0): + """return the number of jiffies that this process has been scheduled + in user and kernel mode""" + status = self.status() + time = int(status[UTIME]) + int(status[STIME]) + if children: + time += int(status[CUTIME]) + int(status[CSTIME]) + return time + + def status(self): + """return the list of fields found in /proc//stat""" + return open(self.file).read().split() + + def name(self): + """return the process name found in /proc//stat + """ + return self.status()[1].strip('()') + + def age(self): + """return the age of the process + """ + return os.stat(self.file)[stat.ST_MTIME] + +class ProcInfoLoader: + """manage process information""" + + def __init__(self): + self._loaded = {} + + def list_pids(self): + """return a list of existent process ids""" + for subdir in os.listdir('/proc'): + if subdir.isdigit(): + yield int(subdir) + + def load(self, pid): + """get a ProcInfo object for a given pid""" + pid = int(pid) + try: + return self._loaded[pid] + except KeyError: + procinfo = ProcInfo(pid) + procinfo.manager = self + self._loaded[pid] = procinfo + return procinfo + + + def load_all(self): + """load all processes information""" + for pid in self.list_pids(): + try: + procinfo = self.load(pid) + if procinfo.parent is None and procinfo.ppid: + pprocinfo = self.load(procinfo.ppid) + pprocinfo.append(procinfo) + except NoSuchProcess: + pass + + +try: + class ResourceError(BaseException): + """Error raise when resource limit is reached""" + limit = "Unknown Resource Limit" +except NameError: + class ResourceError(Exception): + """Error raise when resource limit is reached""" + limit = "Unknown Resource Limit" + + +class XCPUError(ResourceError): + """Error raised when CPU Time limit is reached""" + limit = "CPU Time" + +class LineageMemoryError(ResourceError): + """Error raised when the total amount of memory used by a process and + it's child is reached""" + limit = "Lineage total Memory" + +class TimeoutError(ResourceError): + """Error raised when the process is running for to much time""" + limit = "Real Time" + +# Can't use subclass because the StandardError MemoryError raised +RESOURCE_LIMIT_EXCEPTION = (ResourceError, MemoryError) + + +class MemorySentinel(Thread): + """A class checking a process don't use too much memory in a separated + daemonic thread + """ + def __init__(self, interval, memory_limit, gpid=os.getpid()): + Thread.__init__(self, target=self._run, name="Test.Sentinel") + self.memory_limit = memory_limit + self._stop = Event() + self.interval = interval + self.setDaemon(True) + self.gpid = gpid + + def stop(self): + """stop ap""" + self._stop.set() + + def _run(self): + pil = ProcInfoLoader() + while not self._stop.isSet(): + if self.memory_limit <= pil.load(self.gpid).lineage_memory_usage(): + os.killpg(self.gpid, SIGUSR1) + self._stop.wait(self.interval) + + +class ResourceController: + + def __init__(self, max_cpu_time=None, max_time=None, max_memory=None, + max_reprieve=60): + if SIGXCPU == -1: + raise RuntimeError("Unsupported platform") + self.max_time = max_time + self.max_memory = max_memory + self.max_cpu_time = max_cpu_time + self._reprieve = max_reprieve + self._timer = None + self._msentinel = None + self._old_max_memory = None + self._old_usr1_hdlr = None + self._old_max_cpu_time = None + self._old_usr2_hdlr = None + self._old_sigxcpu_hdlr = None + self._limit_set = 0 + self._abort_try = 0 + self._start_time = None + self._elapse_time = 0 + + def _hangle_sig_timeout(self, sig, frame): + raise TimeoutError() + + def _hangle_sig_memory(self, sig, frame): + if self._abort_try < self._reprieve: + self._abort_try += 1 + raise LineageMemoryError("Memory limit reached") + else: + os.killpg(os.getpid(), SIGKILL) + + def _handle_sigxcpu(self, sig, frame): + if self._abort_try < self._reprieve: + self._abort_try += 1 + raise XCPUError("Soft CPU time limit reached") + else: + os.killpg(os.getpid(), SIGKILL) + + def _time_out(self): + if self._abort_try < self._reprieve: + self._abort_try += 1 + os.killpg(os.getpid(), SIGUSR2) + if self._limit_set > 0: + self._timer = Timer(1, self._time_out) + self._timer.start() + else: + os.killpg(os.getpid(), SIGKILL) + + def setup_limit(self): + """set up the process limit""" + assert currentThread().getName() == 'MainThread' + os.setpgrp() + if self._limit_set <= 0: + if self.max_time is not None: + self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout) + self._timer = Timer(max(1, int(self.max_time) - self._elapse_time), + self._time_out) + self._start_time = int(time()) + self._timer.start() + if self.max_cpu_time is not None: + self._old_max_cpu_time = getrlimit(RLIMIT_CPU) + cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1]) + self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu) + setrlimit(RLIMIT_CPU, cpu_limit) + if self.max_memory is not None: + self._msentinel = MemorySentinel(1, int(self.max_memory) ) + self._old_max_memory = getrlimit(RLIMIT_AS) + self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory) + as_limit = (int(self.max_memory), self._old_max_memory[1]) + setrlimit(RLIMIT_AS, as_limit) + self._msentinel.start() + self._limit_set += 1 + + def clean_limit(self): + """reinstall the old process limit""" + if self._limit_set > 0: + if self.max_time is not None: + self._timer.cancel() + self._elapse_time += int(time())-self._start_time + self._timer = None + signal(SIGUSR2, self._old_usr2_hdlr) + if self.max_cpu_time is not None: + setrlimit(RLIMIT_CPU, self._old_max_cpu_time) + signal(SIGXCPU, self._old_sigxcpu_hdlr) + if self.max_memory is not None: + self._msentinel.stop() + self._msentinel = None + setrlimit(RLIMIT_AS, self._old_max_memory) + signal(SIGUSR1, self._old_usr1_hdlr) + self._limit_set -= 1 diff --git a/pymode/libs/logilab/common/pytest.py b/pymode/libs/logilab/common/pytest.py new file mode 100644 index 00000000..3d8aca34 --- /dev/null +++ b/pymode/libs/logilab/common/pytest.py @@ -0,0 +1,1202 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""pytest is a tool that eases test running and debugging. + +To be able to use pytest, you should either write tests using +the logilab.common.testlib's framework or the unittest module of the +Python's standard library. + +You can customize pytest's behaviour by defining a ``pytestconf.py`` file +somewhere in your test directory. In this file, you can add options or +change the way tests are run. + +To add command line options, you must define a ``update_parser`` function in +your ``pytestconf.py`` file. The function must accept a single parameter +that will be the OptionParser's instance to customize. + +If you wish to customize the tester, you'll have to define a class named +``CustomPyTester``. This class should extend the default `PyTester` class +defined in the pytest module. Take a look at the `PyTester` and `DjangoTester` +classes for more information about what can be done. + +For instance, if you wish to add a custom -l option to specify a loglevel, you +could define the following ``pytestconf.py`` file :: + + import logging + from logilab.common.pytest import PyTester + + def update_parser(parser): + parser.add_option('-l', '--loglevel', dest='loglevel', action='store', + choices=('debug', 'info', 'warning', 'error', 'critical'), + default='critical', help="the default log level possible choices are " + "('debug', 'info', 'warning', 'error', 'critical')") + return parser + + + class CustomPyTester(PyTester): + def __init__(self, cvg, options): + super(CustomPyTester, self).__init__(cvg, options) + loglevel = options.loglevel.upper() + logger = logging.getLogger('erudi') + logger.setLevel(logging.getLevelName(loglevel)) + + +In your TestCase class you can then get the value of a specific option with +the ``optval`` method:: + + class MyTestCase(TestCase): + def test_foo(self): + loglevel = self.optval('loglevel') + # ... + + +You can also tag your tag your test for fine filtering + +With those tag:: + + from logilab.common.testlib import tag, TestCase + + class Exemple(TestCase): + + @tag('rouge', 'carre') + def toto(self): + pass + + @tag('carre', 'vert') + def tata(self): + pass + + @tag('rouge') + def titi(test): + pass + +you can filter the function with a simple python expression + + * ``toto`` and ``titi`` match ``rouge`` + * ``toto``, ``tata`` and ``titi``, match ``rouge or carre`` + * ``tata`` and ``titi`` match``rouge ^ carre`` + * ``titi`` match ``rouge and not carre`` +""" + +from __future__ import print_function + +__docformat__ = "restructuredtext en" + +PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]] + +examples: + +pytest path/to/mytests.py +pytest path/to/mytests.py TheseTests +pytest path/to/mytests.py TheseTests.test_thisone +pytest path/to/mytests.py -m '(not long and database) or regr' + +pytest one (will run both test_thisone and test_thatone) +pytest path/to/mytests.py -s not (will skip test_notthisone) +""" + +ENABLE_DBC = False +FILE_RESTART = ".pytest.restart" + +import os, sys, re +import os.path as osp +from time import time, clock +import warnings +import types +from inspect import isgeneratorfunction, isclass +from contextlib import contextmanager +from random import shuffle + +from logilab.common.fileutils import abspath_listdir +from logilab.common import textutils +from logilab.common import testlib, STD_BLACKLIST +# use the same unittest module as testlib +from logilab.common.testlib import unittest, start_interactive_mode +from logilab.common.deprecation import deprecated +import doctest + +import unittest as unittest_legacy +if not getattr(unittest_legacy, "__package__", None): + try: + import unittest2.suite as unittest_suite + except ImportError: + sys.exit("You have to install python-unittest2 to use this module") +else: + import unittest.suite as unittest_suite + +try: + import django + from logilab.common.modutils import modpath_from_file, load_module_from_modpath + DJANGO_FOUND = True +except ImportError: + DJANGO_FOUND = False + +CONF_FILE = 'pytestconf.py' + +## coverage pausing tools + +@contextmanager +def replace_trace(trace=None): + """A context manager that temporary replaces the trace function""" + oldtrace = sys.gettrace() + sys.settrace(trace) + try: + yield + finally: + # specific hack to work around a bug in pycoverage, see + # https://bitbucket.org/ned/coveragepy/issue/123 + if (oldtrace is not None and not callable(oldtrace) and + hasattr(oldtrace, 'pytrace')): + oldtrace = oldtrace.pytrace + sys.settrace(oldtrace) + + +def pause_trace(): + """A context manager that temporary pauses any tracing""" + return replace_trace() + +class TraceController(object): + ctx_stack = [] + + @classmethod + @deprecated('[lgc 0.63.1] Use the pause_trace() context manager') + def pause_tracing(cls): + cls.ctx_stack.append(pause_trace()) + cls.ctx_stack[-1].__enter__() + + @classmethod + @deprecated('[lgc 0.63.1] Use the pause_trace() context manager') + def resume_tracing(cls): + cls.ctx_stack.pop().__exit__(None, None, None) + + +pause_tracing = TraceController.pause_tracing +resume_tracing = TraceController.resume_tracing + + +def nocoverage(func): + """Function decorator that pauses tracing functions""" + if hasattr(func, 'uncovered'): + return func + func.uncovered = True + + def not_covered(*args, **kwargs): + with pause_trace(): + return func(*args, **kwargs) + not_covered.uncovered = True + return not_covered + +## end of coverage pausing tools + + +TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$") +def this_is_a_testfile(filename): + """returns True if `filename` seems to be a test file""" + return TESTFILE_RE.match(osp.basename(filename)) + +TESTDIR_RE = re.compile("^(unit)?tests?$") +def this_is_a_testdir(dirpath): + """returns True if `filename` seems to be a test directory""" + return TESTDIR_RE.match(osp.basename(dirpath)) + + +def load_pytest_conf(path, parser): + """loads a ``pytestconf.py`` file and update default parser + and / or tester. + """ + namespace = {} + exec(open(path, 'rb').read(), namespace) + if 'update_parser' in namespace: + namespace['update_parser'](parser) + return namespace.get('CustomPyTester', PyTester) + + +def project_root(parser, projdir=os.getcwd()): + """try to find project's root and add it to sys.path""" + previousdir = curdir = osp.abspath(projdir) + testercls = PyTester + conf_file_path = osp.join(curdir, CONF_FILE) + if osp.isfile(conf_file_path): + testercls = load_pytest_conf(conf_file_path, parser) + while this_is_a_testdir(curdir) or \ + osp.isfile(osp.join(curdir, '__init__.py')): + newdir = osp.normpath(osp.join(curdir, os.pardir)) + if newdir == curdir: + break + previousdir = curdir + curdir = newdir + conf_file_path = osp.join(curdir, CONF_FILE) + if osp.isfile(conf_file_path): + testercls = load_pytest_conf(conf_file_path, parser) + return previousdir, testercls + + +class GlobalTestReport(object): + """this class holds global test statistics""" + def __init__(self): + self.ran = 0 + self.skipped = 0 + self.failures = 0 + self.errors = 0 + self.ttime = 0 + self.ctime = 0 + self.modulescount = 0 + self.errmodules = [] + + def feed(self, filename, testresult, ttime, ctime): + """integrates new test information into internal statistics""" + ran = testresult.testsRun + self.ran += ran + self.skipped += len(getattr(testresult, 'skipped', ())) + self.failures += len(testresult.failures) + self.errors += len(testresult.errors) + self.ttime += ttime + self.ctime += ctime + self.modulescount += 1 + if not testresult.wasSuccessful(): + problems = len(testresult.failures) + len(testresult.errors) + self.errmodules.append((filename[:-3], problems, ran)) + + def failed_to_test_module(self, filename): + """called when the test module could not be imported by unittest + """ + self.errors += 1 + self.modulescount += 1 + self.ran += 1 + self.errmodules.append((filename[:-3], 1, 1)) + + def skip_module(self, filename): + self.modulescount += 1 + self.ran += 1 + self.errmodules.append((filename[:-3], 0, 0)) + + def __str__(self): + """this is just presentation stuff""" + line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)' + % (self.ran, self.ttime, self.ctime)] + if self.errors: + line1.append('%s errors' % self.errors) + if self.failures: + line1.append('%s failures' % self.failures) + if self.skipped: + line1.append('%s skipped' % self.skipped) + modulesok = self.modulescount - len(self.errmodules) + if self.errors or self.failures: + line2 = '%s modules OK (%s failed)' % (modulesok, + len(self.errmodules)) + descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules]) + line3 = '\nfailures: %s' % descr + elif modulesok: + line2 = 'All %s modules OK' % modulesok + line3 = '' + else: + return '' + return '%s\n%s%s' % (', '.join(line1), line2, line3) + + + +def remove_local_modules_from_sys(testdir): + """remove all modules from cache that come from `testdir` + + This is used to avoid strange side-effects when using the + testall() mode of pytest. + For instance, if we run pytest on this tree:: + + A/test/test_utils.py + B/test/test_utils.py + + we **have** to clean sys.modules to make sure the correct test_utils + module is ran in B + """ + for modname, mod in list(sys.modules.items()): + if mod is None: + continue + if not hasattr(mod, '__file__'): + # this is the case of some built-in modules like sys, imp, marshal + continue + modfile = mod.__file__ + # if modfile is not an absolute path, it was probably loaded locally + # during the tests + if not osp.isabs(modfile) or modfile.startswith(testdir): + del sys.modules[modname] + + + +class PyTester(object): + """encapsulates testrun logic""" + + def __init__(self, cvg, options): + self.report = GlobalTestReport() + self.cvg = cvg + self.options = options + self.firstwrite = True + self._errcode = None + + def show_report(self): + """prints the report and returns appropriate exitcode""" + # everything has been ran, print report + print("*" * 79) + print(self.report) + + def get_errcode(self): + # errcode set explicitly + if self._errcode is not None: + return self._errcode + return self.report.failures + self.report.errors + + def set_errcode(self, errcode): + self._errcode = errcode + errcode = property(get_errcode, set_errcode) + + def testall(self, exitfirst=False): + """walks through current working directory, finds something + which can be considered as a testdir and runs every test there + """ + here = os.getcwd() + for dirname, dirs, _ in os.walk(here): + for skipped in STD_BLACKLIST: + if skipped in dirs: + dirs.remove(skipped) + basename = osp.basename(dirname) + if this_is_a_testdir(basename): + print("going into", dirname) + # we found a testdir, let's explore it ! + if not self.testonedir(dirname, exitfirst): + break + dirs[:] = [] + if self.report.ran == 0: + print("no test dir found testing here:", here) + # if no test was found during the visit, consider + # the local directory as a test directory even if + # it doesn't have a traditional test directory name + self.testonedir(here) + + def testonedir(self, testdir, exitfirst=False): + """finds each testfile in the `testdir` and runs it + + return true when all tests has been executed, false if exitfirst and + some test has failed. + """ + files = abspath_listdir(testdir) + shuffle(files) + for filename in files: + if this_is_a_testfile(filename): + if self.options.exitfirst and not self.options.restart: + # overwrite restart file + try: + restartfile = open(FILE_RESTART, "w") + restartfile.close() + except Exception: + print("Error while overwriting succeeded test file :", + osp.join(os.getcwd(), FILE_RESTART), + file=sys.__stderr__) + raise + # run test and collect information + prog = self.testfile(filename, batchmode=True) + if exitfirst and (prog is None or not prog.result.wasSuccessful()): + return False + self.firstwrite = True + # clean local modules + remove_local_modules_from_sys(testdir) + return True + + def testfile(self, filename, batchmode=False): + """runs every test in `filename` + + :param filename: an absolute path pointing to a unittest file + """ + here = os.getcwd() + dirname = osp.dirname(filename) + if dirname: + os.chdir(dirname) + # overwrite restart file if it has not been done already + if self.options.exitfirst and not self.options.restart and self.firstwrite: + try: + restartfile = open(FILE_RESTART, "w") + restartfile.close() + except Exception: + print("Error while overwriting succeeded test file :", + osp.join(os.getcwd(), FILE_RESTART), file=sys.__stderr__) + raise + modname = osp.basename(filename)[:-3] + print((' %s ' % osp.basename(filename)).center(70, '='), + file=sys.__stderr__) + try: + tstart, cstart = time(), clock() + try: + testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg, + options=self.options, outstream=sys.stderr) + except KeyboardInterrupt: + raise + except SystemExit as exc: + self.errcode = exc.code + raise + except testlib.SkipTest: + print("Module skipped:", filename) + self.report.skip_module(filename) + return None + except Exception: + self.report.failed_to_test_module(filename) + print('unhandled exception occurred while testing', modname, + file=sys.stderr) + import traceback + traceback.print_exc(file=sys.stderr) + return None + + tend, cend = time(), clock() + ttime, ctime = (tend - tstart), (cend - cstart) + self.report.feed(filename, testprog.result, ttime, ctime) + return testprog + finally: + if dirname: + os.chdir(here) + + + +class DjangoTester(PyTester): + + def load_django_settings(self, dirname): + """try to find project's setting and load it""" + curdir = osp.abspath(dirname) + previousdir = curdir + while not osp.isfile(osp.join(curdir, 'settings.py')) and \ + osp.isfile(osp.join(curdir, '__init__.py')): + newdir = osp.normpath(osp.join(curdir, os.pardir)) + if newdir == curdir: + raise AssertionError('could not find settings.py') + previousdir = curdir + curdir = newdir + # late django initialization + settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, 'settings.py'))) + from django.core.management import setup_environ + setup_environ(settings) + settings.DEBUG = False + self.settings = settings + # add settings dir to pythonpath since it's the project's root + if curdir not in sys.path: + sys.path.insert(1, curdir) + + def before_testfile(self): + # Those imports must be done **after** setup_environ was called + from django.test.utils import setup_test_environment + from django.test.utils import create_test_db + setup_test_environment() + create_test_db(verbosity=0) + self.dbname = self.settings.TEST_DATABASE_NAME + + def after_testfile(self): + # Those imports must be done **after** setup_environ was called + from django.test.utils import teardown_test_environment + from django.test.utils import destroy_test_db + teardown_test_environment() + print('destroying', self.dbname) + destroy_test_db(self.dbname, verbosity=0) + + def testall(self, exitfirst=False): + """walks through current working directory, finds something + which can be considered as a testdir and runs every test there + """ + for dirname, dirs, files in os.walk(os.getcwd()): + for skipped in ('CVS', '.svn', '.hg'): + if skipped in dirs: + dirs.remove(skipped) + if 'tests.py' in files: + if not self.testonedir(dirname, exitfirst): + break + dirs[:] = [] + else: + basename = osp.basename(dirname) + if basename in ('test', 'tests'): + print("going into", dirname) + # we found a testdir, let's explore it ! + if not self.testonedir(dirname, exitfirst): + break + dirs[:] = [] + + def testonedir(self, testdir, exitfirst=False): + """finds each testfile in the `testdir` and runs it + + return true when all tests has been executed, false if exitfirst and + some test has failed. + """ + # special django behaviour : if tests are splitted in several files, + # remove the main tests.py file and tests each test file separately + testfiles = [fpath for fpath in abspath_listdir(testdir) + if this_is_a_testfile(fpath)] + if len(testfiles) > 1: + try: + testfiles.remove(osp.join(testdir, 'tests.py')) + except ValueError: + pass + for filename in testfiles: + # run test and collect information + prog = self.testfile(filename, batchmode=True) + if exitfirst and (prog is None or not prog.result.wasSuccessful()): + return False + # clean local modules + remove_local_modules_from_sys(testdir) + return True + + def testfile(self, filename, batchmode=False): + """runs every test in `filename` + + :param filename: an absolute path pointing to a unittest file + """ + here = os.getcwd() + dirname = osp.dirname(filename) + if dirname: + os.chdir(dirname) + self.load_django_settings(dirname) + modname = osp.basename(filename)[:-3] + print((' %s ' % osp.basename(filename)).center(70, '='), + file=sys.stderr) + try: + try: + tstart, cstart = time(), clock() + self.before_testfile() + testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg) + tend, cend = time(), clock() + ttime, ctime = (tend - tstart), (cend - cstart) + self.report.feed(filename, testprog.result, ttime, ctime) + return testprog + except SystemExit: + raise + except Exception as exc: + import traceback + traceback.print_exc() + self.report.failed_to_test_module(filename) + print('unhandled exception occurred while testing', modname) + print('error: %s' % exc) + return None + finally: + self.after_testfile() + if dirname: + os.chdir(here) + + +def make_parser(): + """creates the OptionParser instance + """ + from optparse import OptionParser + parser = OptionParser(usage=PYTEST_DOC) + + parser.newargs = [] + def rebuild_cmdline(option, opt, value, parser): + """carry the option to unittest_main""" + parser.newargs.append(opt) + + def rebuild_and_store(option, opt, value, parser): + """carry the option to unittest_main and store + the value on current parser + """ + parser.newargs.append(opt) + setattr(parser.values, option.dest, True) + + def capture_and_rebuild(option, opt, value, parser): + warnings.simplefilter('ignore', DeprecationWarning) + rebuild_cmdline(option, opt, value, parser) + + # pytest options + parser.add_option('-t', dest='testdir', default=None, + help="directory where the tests will be found") + parser.add_option('-d', dest='dbc', default=False, + action="store_true", help="enable design-by-contract") + # unittest_main options provided and passed through pytest + parser.add_option('-v', '--verbose', callback=rebuild_cmdline, + action="callback", help="Verbose output") + parser.add_option('-i', '--pdb', callback=rebuild_and_store, + dest="pdb", action="callback", + help="Enable test failure inspection") + parser.add_option('-x', '--exitfirst', callback=rebuild_and_store, + dest="exitfirst", default=False, + action="callback", help="Exit on first failure " + "(only make sense when pytest run one test file)") + parser.add_option('-R', '--restart', callback=rebuild_and_store, + dest="restart", default=False, + action="callback", + help="Restart tests from where it failed (implies exitfirst) " + "(only make sense if tests previously ran with exitfirst only)") + parser.add_option('--color', callback=rebuild_cmdline, + action="callback", + help="colorize tracebacks") + parser.add_option('-s', '--skip', + # XXX: I wish I could use the callback action but it + # doesn't seem to be able to get the value + # associated to the option + action="store", dest="skipped", default=None, + help="test names matching this name will be skipped " + "to skip several patterns, use commas") + parser.add_option('-q', '--quiet', callback=rebuild_cmdline, + action="callback", help="Minimal output") + parser.add_option('-P', '--profile', default=None, dest='profile', + help="Profile execution and store data in the given file") + parser.add_option('-m', '--match', default=None, dest='tags_pattern', + help="only execute test whose tag match the current pattern") + + if DJANGO_FOUND: + parser.add_option('-J', '--django', dest='django', default=False, + action="store_true", + help='use pytest for django test cases') + return parser + + +def parseargs(parser): + """Parse the command line and return (options processed), (options to pass to + unittest_main()), (explicitfile or None). + """ + # parse the command line + options, args = parser.parse_args() + filenames = [arg for arg in args if arg.endswith('.py')] + if filenames: + if len(filenames) > 1: + parser.error("only one filename is acceptable") + explicitfile = filenames[0] + args.remove(explicitfile) + else: + explicitfile = None + # someone wants DBC + testlib.ENABLE_DBC = options.dbc + newargs = parser.newargs + if options.skipped: + newargs.extend(['--skip', options.skipped]) + # restart implies exitfirst + if options.restart: + options.exitfirst = True + # append additional args to the new sys.argv and let unittest_main + # do the rest + newargs += args + return options, explicitfile + + + +def run(): + parser = make_parser() + rootdir, testercls = project_root(parser) + options, explicitfile = parseargs(parser) + # mock a new command line + sys.argv[1:] = parser.newargs + cvg = None + if not '' in sys.path: + sys.path.insert(0, '') + if DJANGO_FOUND and options.django: + tester = DjangoTester(cvg, options) + else: + tester = testercls(cvg, options) + if explicitfile: + cmd, args = tester.testfile, (explicitfile,) + elif options.testdir: + cmd, args = tester.testonedir, (options.testdir, options.exitfirst) + else: + cmd, args = tester.testall, (options.exitfirst,) + try: + try: + if options.profile: + import hotshot + prof = hotshot.Profile(options.profile) + prof.runcall(cmd, *args) + prof.close() + print('profile data saved in', options.profile) + else: + cmd(*args) + except SystemExit: + raise + except: + import traceback + traceback.print_exc() + finally: + tester.show_report() + sys.exit(tester.errcode) + +class SkipAwareTestProgram(unittest.TestProgram): + # XXX: don't try to stay close to unittest.py, use optparse + USAGE = """\ +Usage: %(progName)s [options] [test] [...] + +Options: + -h, --help Show this message + -v, --verbose Verbose output + -i, --pdb Enable test failure inspection + -x, --exitfirst Exit on first failure + -s, --skip skip test matching this pattern (no regexp for now) + -q, --quiet Minimal output + --color colorize tracebacks + + -m, --match Run only test whose tag match this pattern + + -P, --profile FILE: Run the tests using cProfile and saving results + in FILE + +Examples: + %(progName)s - run default set of tests + %(progName)s MyTestSuite - run suite 'MyTestSuite' + %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething + %(progName)s MyTestCase - run all 'test*' test methods + in MyTestCase +""" + def __init__(self, module='__main__', defaultTest=None, batchmode=False, + cvg=None, options=None, outstream=sys.stderr): + self.batchmode = batchmode + self.cvg = cvg + self.options = options + self.outstream = outstream + super(SkipAwareTestProgram, self).__init__( + module=module, defaultTest=defaultTest, + testLoader=NonStrictTestLoader()) + + def parseArgs(self, argv): + self.pdbmode = False + self.exitfirst = False + self.skipped_patterns = [] + self.test_pattern = None + self.tags_pattern = None + self.colorize = False + self.profile_name = None + import getopt + try: + options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:', + ['help', 'verbose', 'quiet', 'pdb', + 'exitfirst', 'restart', + 'skip=', 'color', 'match=', 'profile=']) + for opt, value in options: + if opt in ('-h', '-H', '--help'): + self.usageExit() + if opt in ('-i', '--pdb'): + self.pdbmode = True + if opt in ('-x', '--exitfirst'): + self.exitfirst = True + if opt in ('-r', '--restart'): + self.restart = True + self.exitfirst = True + if opt in ('-q', '--quiet'): + self.verbosity = 0 + if opt in ('-v', '--verbose'): + self.verbosity = 2 + if opt in ('-s', '--skip'): + self.skipped_patterns = [pat.strip() for pat in + value.split(', ')] + if opt == '--color': + self.colorize = True + if opt in ('-m', '--match'): + #self.tags_pattern = value + self.options["tag_pattern"] = value + if opt in ('-P', '--profile'): + self.profile_name = value + self.testLoader.skipped_patterns = self.skipped_patterns + if len(args) == 0 and self.defaultTest is None: + suitefunc = getattr(self.module, 'suite', None) + if isinstance(suitefunc, (types.FunctionType, + types.MethodType)): + self.test = self.module.suite() + else: + self.test = self.testLoader.loadTestsFromModule(self.module) + return + if len(args) > 0: + self.test_pattern = args[0] + self.testNames = args + else: + self.testNames = (self.defaultTest, ) + self.createTests() + except getopt.error as msg: + self.usageExit(msg) + + def runTests(self): + if self.profile_name: + import cProfile + cProfile.runctx('self._runTests()', globals(), locals(), self.profile_name ) + else: + return self._runTests() + + def _runTests(self): + self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity, + stream=self.outstream, + exitfirst=self.exitfirst, + pdbmode=self.pdbmode, + cvg=self.cvg, + test_pattern=self.test_pattern, + skipped_patterns=self.skipped_patterns, + colorize=self.colorize, + batchmode=self.batchmode, + options=self.options) + + def removeSucceededTests(obj, succTests): + """ Recursive function that removes succTests from + a TestSuite or TestCase + """ + if isinstance(obj, unittest.TestSuite): + removeSucceededTests(obj._tests, succTests) + if isinstance(obj, list): + for el in obj[:]: + if isinstance(el, unittest.TestSuite): + removeSucceededTests(el, succTests) + elif isinstance(el, unittest.TestCase): + descr = '.'.join((el.__class__.__module__, + el.__class__.__name__, + el._testMethodName)) + if descr in succTests: + obj.remove(el) + # take care, self.options may be None + if getattr(self.options, 'restart', False): + # retrieve succeeded tests from FILE_RESTART + try: + restartfile = open(FILE_RESTART, 'r') + try: + succeededtests = list(elem.rstrip('\n\r') for elem in + restartfile.readlines()) + removeSucceededTests(self.test, succeededtests) + finally: + restartfile.close() + except Exception as ex: + raise Exception("Error while reading succeeded tests into %s: %s" + % (osp.join(os.getcwd(), FILE_RESTART), ex)) + + result = self.testRunner.run(self.test) + # help garbage collection: we want TestSuite, which hold refs to every + # executed TestCase, to be gc'ed + del self.test + if getattr(result, "debuggers", None) and \ + getattr(self, "pdbmode", None): + start_interactive_mode(result) + if not getattr(self, "batchmode", None): + sys.exit(not result.wasSuccessful()) + self.result = result + + +class SkipAwareTextTestRunner(unittest.TextTestRunner): + + def __init__(self, stream=sys.stderr, verbosity=1, + exitfirst=False, pdbmode=False, cvg=None, test_pattern=None, + skipped_patterns=(), colorize=False, batchmode=False, + options=None): + super(SkipAwareTextTestRunner, self).__init__(stream=stream, + verbosity=verbosity) + self.exitfirst = exitfirst + self.pdbmode = pdbmode + self.cvg = cvg + self.test_pattern = test_pattern + self.skipped_patterns = skipped_patterns + self.colorize = colorize + self.batchmode = batchmode + self.options = options + + def _this_is_skipped(self, testedname): + return any([(pat in testedname) for pat in self.skipped_patterns]) + + def _runcondition(self, test, skipgenerator=True): + if isinstance(test, testlib.InnerTest): + testname = test.name + else: + if isinstance(test, testlib.TestCase): + meth = test._get_test_method() + testname = '%s.%s' % (test.__name__, meth.__name__) + elif isinstance(test, types.FunctionType): + func = test + testname = func.__name__ + elif isinstance(test, types.MethodType): + cls = test.__self__.__class__ + testname = '%s.%s' % (cls.__name__, test.__name__) + else: + return True # Not sure when this happens + if isgeneratorfunction(test) and skipgenerator: + return self.does_match_tags(test) # Let inner tests decide at run time + if self._this_is_skipped(testname): + return False # this was explicitly skipped + if self.test_pattern is not None: + try: + classpattern, testpattern = self.test_pattern.split('.') + klass, name = testname.split('.') + if classpattern not in klass or testpattern not in name: + return False + except ValueError: + if self.test_pattern not in testname: + return False + + return self.does_match_tags(test) + + def does_match_tags(self, test): + if self.options is not None: + tags_pattern = getattr(self.options, 'tags_pattern', None) + if tags_pattern is not None: + tags = getattr(test, 'tags', testlib.Tags()) + if tags.inherit and isinstance(test, types.MethodType): + tags = tags | getattr(test.__self__.__class__, 'tags', testlib.Tags()) + return tags.match(tags_pattern) + return True # no pattern + + def _makeResult(self): + return testlib.SkipAwareTestResult(self.stream, self.descriptions, + self.verbosity, self.exitfirst, + self.pdbmode, self.cvg, self.colorize) + + def run(self, test): + "Run the given test case or test suite." + result = self._makeResult() + startTime = time() + test(result, runcondition=self._runcondition, options=self.options) + stopTime = time() + timeTaken = stopTime - startTime + result.printErrors() + if not self.batchmode: + self.stream.writeln(result.separator2) + run = result.testsRun + self.stream.writeln("Ran %d test%s in %.3fs" % + (run, run != 1 and "s" or "", timeTaken)) + self.stream.writeln() + if not result.wasSuccessful(): + if self.colorize: + self.stream.write(textutils.colorize_ansi("FAILED", color='red')) + else: + self.stream.write("FAILED") + else: + if self.colorize: + self.stream.write(textutils.colorize_ansi("OK", color='green')) + else: + self.stream.write("OK") + failed, errored, skipped = map(len, (result.failures, + result.errors, + result.skipped)) + + det_results = [] + for name, value in (("failures", result.failures), + ("errors",result.errors), + ("skipped", result.skipped)): + if value: + det_results.append("%s=%i" % (name, len(value))) + if det_results: + self.stream.write(" (") + self.stream.write(', '.join(det_results)) + self.stream.write(")") + self.stream.writeln("") + return result + +class NonStrictTestLoader(unittest.TestLoader): + """ + Overrides default testloader to be able to omit classname when + specifying tests to run on command line. + + For example, if the file test_foo.py contains :: + + class FooTC(TestCase): + def test_foo1(self): # ... + def test_foo2(self): # ... + def test_bar1(self): # ... + + class BarTC(TestCase): + def test_bar2(self): # ... + + 'python test_foo.py' will run the 3 tests in FooTC + 'python test_foo.py FooTC' will run the 3 tests in FooTC + 'python test_foo.py test_foo' will run test_foo1 and test_foo2 + 'python test_foo.py test_foo1' will run test_foo1 + 'python test_foo.py test_bar' will run FooTC.test_bar1 and BarTC.test_bar2 + """ + + def __init__(self): + self.skipped_patterns = () + + # some magic here to accept empty list by extending + # and to provide callable capability + def loadTestsFromNames(self, names, module=None): + suites = [] + for name in names: + suites.extend(self.loadTestsFromName(name, module)) + return self.suiteClass(suites) + + def _collect_tests(self, module): + tests = {} + for obj in vars(module).values(): + if isclass(obj) and issubclass(obj, unittest.TestCase): + classname = obj.__name__ + if classname[0] == '_' or self._this_is_skipped(classname): + continue + methodnames = [] + # obj is a TestCase class + for attrname in dir(obj): + if attrname.startswith(self.testMethodPrefix): + attr = getattr(obj, attrname) + if callable(attr): + methodnames.append(attrname) + # keep track of class (obj) for convenience + tests[classname] = (obj, methodnames) + return tests + + def loadTestsFromSuite(self, module, suitename): + try: + suite = getattr(module, suitename)() + except AttributeError: + return [] + assert hasattr(suite, '_tests'), \ + "%s.%s is not a valid TestSuite" % (module.__name__, suitename) + # python2.3 does not implement __iter__ on suites, we need to return + # _tests explicitly + return suite._tests + + def loadTestsFromName(self, name, module=None): + parts = name.split('.') + if module is None or len(parts) > 2: + # let the base class do its job here + return [super(NonStrictTestLoader, self).loadTestsFromName(name)] + tests = self._collect_tests(module) + collected = [] + if len(parts) == 1: + pattern = parts[0] + if callable(getattr(module, pattern, None) + ) and pattern not in tests: + # consider it as a suite + return self.loadTestsFromSuite(module, pattern) + if pattern in tests: + # case python unittest_foo.py MyTestTC + klass, methodnames = tests[pattern] + for methodname in methodnames: + collected = [klass(methodname) + for methodname in methodnames] + else: + # case python unittest_foo.py something + for klass, methodnames in tests.values(): + # skip methodname if matched by skipped_patterns + for skip_pattern in self.skipped_patterns: + methodnames = [methodname + for methodname in methodnames + if skip_pattern not in methodname] + collected += [klass(methodname) + for methodname in methodnames + if pattern in methodname] + elif len(parts) == 2: + # case "MyClass.test_1" + classname, pattern = parts + klass, methodnames = tests.get(classname, (None, [])) + for methodname in methodnames: + collected = [klass(methodname) for methodname in methodnames + if pattern in methodname] + return collected + + def _this_is_skipped(self, testedname): + return any([(pat in testedname) for pat in self.skipped_patterns]) + + def getTestCaseNames(self, testCaseClass): + """Return a sorted sequence of method names found within testCaseClass + """ + is_skipped = self._this_is_skipped + classname = testCaseClass.__name__ + if classname[0] == '_' or is_skipped(classname): + return [] + testnames = super(NonStrictTestLoader, self).getTestCaseNames( + testCaseClass) + return [testname for testname in testnames if not is_skipped(testname)] + + +# The 2 functions below are modified versions of the TestSuite.run method +# that is provided with unittest2 for python 2.6, in unittest2/suite.py +# It is used to monkeypatch the original implementation to support +# extra runcondition and options arguments (see in testlib.py) + +def _ts_run(self, result, runcondition=None, options=None): + self._wrapped_run(result, runcondition=runcondition, options=options) + self._tearDownPreviousClass(None, result) + self._handleModuleTearDown(result) + return result + +def _ts_wrapped_run(self, result, debug=False, runcondition=None, options=None): + for test in self: + if result.shouldStop: + break + if unittest_suite._isnotsuite(test): + self._tearDownPreviousClass(test, result) + self._handleModuleFixture(test, result) + self._handleClassSetUp(test, result) + result._previousTestClass = test.__class__ + if (getattr(test.__class__, '_classSetupFailed', False) or + getattr(result, '_moduleSetUpFailed', False)): + continue + + # --- modifications to deal with _wrapped_run --- + # original code is: + # + # if not debug: + # test(result) + # else: + # test.debug() + if hasattr(test, '_wrapped_run'): + try: + test._wrapped_run(result, debug, runcondition=runcondition, options=options) + except TypeError: + test._wrapped_run(result, debug) + elif not debug: + try: + test(result, runcondition, options) + except TypeError: + test(result) + else: + test.debug() + # --- end of modifications to deal with _wrapped_run --- + return result + +if sys.version_info >= (2, 7): + # The function below implements a modified version of the + # TestSuite.run method that is provided with python 2.7, in + # unittest/suite.py + def _ts_run(self, result, debug=False, runcondition=None, options=None): + topLevel = False + if getattr(result, '_testRunEntered', False) is False: + result._testRunEntered = topLevel = True + + self._wrapped_run(result, debug, runcondition, options) + + if topLevel: + self._tearDownPreviousClass(None, result) + self._handleModuleTearDown(result) + result._testRunEntered = False + return result + + +def enable_dbc(*args): + """ + Without arguments, return True if contracts can be enabled and should be + enabled (see option -d), return False otherwise. + + With arguments, return False if contracts can't or shouldn't be enabled, + otherwise weave ContractAspect with items passed as arguments. + """ + if not ENABLE_DBC: + return False + try: + from logilab.aspects.weaver import weaver + from logilab.aspects.lib.contracts import ContractAspect + except ImportError: + sys.stderr.write( + 'Warning: logilab.aspects is not available. Contracts disabled.') + return False + for arg in args: + weaver.weave_module(arg, ContractAspect) + return True + + +# monkeypatch unittest and doctest (ouch !) +unittest._TextTestResult = testlib.SkipAwareTestResult +unittest.TextTestRunner = SkipAwareTextTestRunner +unittest.TestLoader = NonStrictTestLoader +unittest.TestProgram = SkipAwareTestProgram + +if sys.version_info >= (2, 4): + doctest.DocTestCase.__bases__ = (testlib.TestCase,) + # XXX check python2.6 compatibility + #doctest.DocTestCase._cleanups = [] + #doctest.DocTestCase._out = [] +else: + unittest.FunctionTestCase.__bases__ = (testlib.TestCase,) +unittest.TestSuite.run = _ts_run +unittest.TestSuite._wrapped_run = _ts_wrapped_run diff --git a/pymode/libs/logilab/common/registry.py b/pymode/libs/logilab/common/registry.py new file mode 100644 index 00000000..86a85f94 --- /dev/null +++ b/pymode/libs/logilab/common/registry.py @@ -0,0 +1,1125 @@ +# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of Logilab-common. +# +# Logilab-common is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 2.1 of the License, or (at your +# option) any later version. +# +# Logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with Logilab-common. If not, see . +"""This module provides bases for predicates dispatching (the pattern in use +here is similar to what's refered as multi-dispatch or predicate-dispatch in the +literature, though a bit different since the idea is to select across different +implementation 'e.g. classes), not to dispatch a message to a function or +method. It contains the following classes: + +* :class:`RegistryStore`, the top level object which loads implementation + objects and stores them into registries. You'll usually use it to access + registries and their contained objects; + +* :class:`Registry`, the base class which contains objects semantically grouped + (for instance, sharing a same API, hence the 'implementation' name). You'll + use it to select the proper implementation according to a context. Notice you + may use registries on their own without using the store. + +.. Note:: + + implementation objects are usually designed to be accessed through the + registry and not by direct instantiation, besides to use it as base classe. + +The selection procedure is delegated to a selector, which is responsible for +scoring the object according to some context. At the end of the selection, if an +implementation has been found, an instance of this class is returned. A selector +is built from one or more predicates combined together using AND, OR, NOT +operators (actually `&`, `|` and `~`). You'll thus find some base classes to +build predicates: + +* :class:`Predicate`, the abstract base predicate class + +* :class:`AndPredicate`, :class:`OrPredicate`, :class:`NotPredicate`, which you + shouldn't have to use directly. You'll use `&`, `|` and '~' operators between + predicates directly + +* :func:`objectify_predicate` + +You'll eventually find one concrete predicate: :class:`yes` + +.. autoclass:: RegistryStore +.. autoclass:: Registry + +Predicates +---------- +.. autoclass:: Predicate +.. autofunc:: objectify_predicate +.. autoclass:: yes + +Debugging +--------- +.. autoclass:: traced_selection + +Exceptions +---------- +.. autoclass:: RegistryException +.. autoclass:: RegistryNotFound +.. autoclass:: ObjectNotFound +.. autoclass:: NoSelectableObject +""" + +from __future__ import print_function + +__docformat__ = "restructuredtext en" + +import sys +import types +import weakref +import traceback as tb +from os import listdir, stat +from os.path import join, isdir, exists +from logging import getLogger +from warnings import warn + +from six import string_types, add_metaclass + +from logilab.common.modutils import modpath_from_file +from logilab.common.logging_ext import set_log_methods +from logilab.common.decorators import classproperty + + +class RegistryException(Exception): + """Base class for registry exception.""" + +class RegistryNotFound(RegistryException): + """Raised when an unknown registry is requested. + + This is usually a programming/typo error. + """ + +class ObjectNotFound(RegistryException): + """Raised when an unregistered object is requested. + + This may be a programming/typo or a misconfiguration error. + """ + +class NoSelectableObject(RegistryException): + """Raised when no object is selectable for a given context.""" + def __init__(self, args, kwargs, objects): + self.args = args + self.kwargs = kwargs + self.objects = objects + + def __str__(self): + return ('args: %s, kwargs: %s\ncandidates: %s' + % (self.args, self.kwargs.keys(), self.objects)) + +class SelectAmbiguity(RegistryException): + """Raised when several objects compete at selection time with an equal + score. + + """ + + +def _modname_from_path(path, extrapath=None): + modpath = modpath_from_file(path, extrapath) + # omit '__init__' from package's name to avoid loading that module + # once for each name when it is imported by some other object + # module. This supposes import in modules are done as:: + # + # from package import something + # + # not:: + # + # from package.__init__ import something + # + # which seems quite correct. + if modpath[-1] == '__init__': + modpath.pop() + return '.'.join(modpath) + + +def _toload_info(path, extrapath, _toload=None): + """Return a dictionary of : and an ordered list of + (file, module name) to load + """ + if _toload is None: + assert isinstance(path, list) + _toload = {}, [] + for fileordir in path: + if isdir(fileordir) and exists(join(fileordir, '__init__.py')): + subfiles = [join(fileordir, fname) for fname in listdir(fileordir)] + _toload_info(subfiles, extrapath, _toload) + elif fileordir[-3:] == '.py': + modname = _modname_from_path(fileordir, extrapath) + _toload[0][modname] = fileordir + _toload[1].append((fileordir, modname)) + return _toload + + +class RegistrableObject(object): + """This is the base class for registrable objects which are selected + according to a context. + + :attr:`__registry__` + name of the registry for this object (string like 'views', + 'templates'...). You may want to define `__registries__` directly if your + object should be registered in several registries. + + :attr:`__regid__` + object's identifier in the registry (string like 'main', + 'primary', 'folder_box') + + :attr:`__select__` + class'selector + + Moreover, the `__abstract__` attribute may be set to True to indicate that a + class is abstract and should not be registered. + + You don't have to inherit from this class to put it in a registry (having + `__regid__` and `__select__` is enough), though this is needed for classes + that should be automatically registered. + """ + + __registry__ = None + __regid__ = None + __select__ = None + __abstract__ = True # see doc snipppets below (in Registry class) + + @classproperty + def __registries__(cls): + if cls.__registry__ is None: + return () + return (cls.__registry__,) + + +class RegistrableInstance(RegistrableObject): + """Inherit this class if you want instances of the classes to be + automatically registered. + """ + + def __new__(cls, *args, **kwargs): + """Add a __module__ attribute telling the module where the instance was + created, for automatic registration. + """ + obj = super(RegistrableInstance, cls).__new__(cls) + # XXX subclass must no override __new__ + filepath = tb.extract_stack(limit=2)[0][0] + obj.__module__ = _modname_from_path(filepath) + return obj + + +class Registry(dict): + """The registry store a set of implementations associated to identifier: + + * to each identifier are associated a list of implementations + + * to select an implementation of a given identifier, you should use one of the + :meth:`select` or :meth:`select_or_none` method + + * to select a list of implementations for a context, you should use the + :meth:`possible_objects` method + + * dictionary like access to an identifier will return the bare list of + implementations for this identifier. + + To be usable in a registry, the only requirement is to have a `__select__` + attribute. + + At the end of the registration process, the :meth:`__registered__` + method is called on each registered object which have them, given the + registry in which it's registered as argument. + + Registration methods: + + .. automethod: register + .. automethod: unregister + + Selection methods: + + .. automethod: select + .. automethod: select_or_none + .. automethod: possible_objects + .. automethod: object_by_id + """ + def __init__(self, debugmode): + super(Registry, self).__init__() + self.debugmode = debugmode + + def __getitem__(self, name): + """return the registry (list of implementation objects) associated to + this name + """ + try: + return super(Registry, self).__getitem__(name) + except KeyError: + exc = ObjectNotFound(name) + exc.__traceback__ = sys.exc_info()[-1] + raise exc + + @classmethod + def objid(cls, obj): + """returns a unique identifier for an object stored in the registry""" + return '%s.%s' % (obj.__module__, cls.objname(obj)) + + @classmethod + def objname(cls, obj): + """returns a readable name for an object stored in the registry""" + return getattr(obj, '__name__', id(obj)) + + def initialization_completed(self): + """call method __registered__() on registered objects when the callback + is defined""" + for objects in self.values(): + for objectcls in objects: + registered = getattr(objectcls, '__registered__', None) + if registered: + registered(self) + if self.debugmode: + wrap_predicates(_lltrace) + + def register(self, obj, oid=None, clear=False): + """base method to add an object in the registry""" + assert not '__abstract__' in obj.__dict__, obj + assert obj.__select__, obj + oid = oid or obj.__regid__ + assert oid, ('no explicit name supplied to register object %s, ' + 'which has no __regid__ set' % obj) + if clear: + objects = self[oid] = [] + else: + objects = self.setdefault(oid, []) + assert not obj in objects, 'object %s is already registered' % obj + objects.append(obj) + + def register_and_replace(self, obj, replaced): + """remove and register """ + # XXXFIXME this is a duplication of unregister() + # remove register_and_replace in favor of unregister + register + # or simplify by calling unregister then register here + if not isinstance(replaced, string_types): + replaced = self.objid(replaced) + # prevent from misspelling + assert obj is not replaced, 'replacing an object by itself: %s' % obj + registered_objs = self.get(obj.__regid__, ()) + for index, registered in enumerate(registered_objs): + if self.objid(registered) == replaced: + del registered_objs[index] + break + else: + self.warning('trying to replace %s that is not registered with %s', + replaced, obj) + self.register(obj) + + def unregister(self, obj): + """remove object from this registry""" + objid = self.objid(obj) + oid = obj.__regid__ + for registered in self.get(oid, ()): + # use self.objid() to compare objects because vreg will probably + # have its own version of the object, loaded through execfile + if self.objid(registered) == objid: + self[oid].remove(registered) + break + else: + self.warning('can\'t remove %s, no id %s in the registry', + objid, oid) + + def all_objects(self): + """return a list containing all objects in this registry. + """ + result = [] + for objs in self.values(): + result += objs + return result + + # dynamic selection methods ################################################ + + def object_by_id(self, oid, *args, **kwargs): + """return object with the `oid` identifier. Only one object is expected + to be found. + + raise :exc:`ObjectNotFound` if there are no object with id `oid` in this + registry + + raise :exc:`AssertionError` if there is more than one object there + """ + objects = self[oid] + assert len(objects) == 1, objects + return objects[0](*args, **kwargs) + + def select(self, __oid, *args, **kwargs): + """return the most specific object among those with the given oid + according to the given context. + + raise :exc:`ObjectNotFound` if there are no object with id `oid` in this + registry + + raise :exc:`NoSelectableObject` if no object can be selected + """ + obj = self._select_best(self[__oid], *args, **kwargs) + if obj is None: + raise NoSelectableObject(args, kwargs, self[__oid] ) + return obj + + def select_or_none(self, __oid, *args, **kwargs): + """return the most specific object among those with the given oid + according to the given context, or None if no object applies. + """ + try: + return self._select_best(self[__oid], *args, **kwargs) + except ObjectNotFound: + return None + + def possible_objects(self, *args, **kwargs): + """return an iterator on possible objects in this registry for the given + context + """ + for objects in self.values(): + obj = self._select_best(objects, *args, **kwargs) + if obj is None: + continue + yield obj + + def _select_best(self, objects, *args, **kwargs): + """return an instance of the most specific object according + to parameters + + return None if not object apply (don't raise `NoSelectableObject` since + it's costly when searching objects using `possible_objects` + (e.g. searching for hooks). + """ + score, winners = 0, None + for obj in objects: + objectscore = obj.__select__(obj, *args, **kwargs) + if objectscore > score: + score, winners = objectscore, [obj] + elif objectscore > 0 and objectscore == score: + winners.append(obj) + if winners is None: + return None + if len(winners) > 1: + # log in production environement / test, error while debugging + msg = 'select ambiguity: %s\n(args: %s, kwargs: %s)' + if self.debugmode: + # raise bare exception in debug mode + raise SelectAmbiguity(msg % (winners, args, kwargs.keys())) + self.error(msg, winners, args, kwargs.keys()) + # return the result of calling the object + return self.selected(winners[0], args, kwargs) + + def selected(self, winner, args, kwargs): + """override here if for instance you don't want "instanciation" + """ + return winner(*args, **kwargs) + + # these are overridden by set_log_methods below + # only defining here to prevent pylint from complaining + info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None + + +def obj_registries(cls, registryname=None): + """return a tuple of registry names (see __registries__)""" + if registryname: + return (registryname,) + return cls.__registries__ + + +class RegistryStore(dict): + """This class is responsible for loading objects and storing them + in their registry which is created on the fly as needed. + + It handles dynamic registration of objects and provides a + convenient api to access them. To be recognized as an object that + should be stored into one of the store's registry + (:class:`Registry`), an object must provide the following + attributes, used control how they interact with the registry: + + :attr:`__registries__` + list of registry names (string like 'views', 'templates'...) into which + the object should be registered + + :attr:`__regid__` + object identifier in the registry (string like 'main', + 'primary', 'folder_box') + + :attr:`__select__` + the object predicate selectors + + Moreover, the :attr:`__abstract__` attribute may be set to `True` + to indicate that an object is abstract and should not be registered + (such inherited attributes not considered). + + .. Note:: + + When using the store to load objects dynamically, you *always* have + to use **super()** to get the methods and attributes of the + superclasses, and not use the class identifier. If not, you'll get into + trouble at reload time. + + For example, instead of writing:: + + class Thing(Parent): + __regid__ = 'athing' + __select__ = yes() + + def f(self, arg1): + Parent.f(self, arg1) + + You must write:: + + class Thing(Parent): + __regid__ = 'athing' + __select__ = yes() + + def f(self, arg1): + super(Thing, self).f(arg1) + + Controlling object registration + ------------------------------- + + Dynamic loading is triggered by calling the + :meth:`register_objects` method, given a list of directories to + inspect for python modules. + + .. automethod: register_objects + + For each module, by default, all compatible objects are registered + automatically. However if some objects come as replacement of + other objects, or have to be included only if some condition is + met, you'll have to define a `registration_callback(vreg)` + function in the module and explicitly register **all objects** in + this module, using the api defined below. + + + .. automethod:: RegistryStore.register_all + .. automethod:: RegistryStore.register_and_replace + .. automethod:: RegistryStore.register + .. automethod:: RegistryStore.unregister + + .. Note:: + Once the function `registration_callback(vreg)` is implemented in a + module, all the objects from this module have to be explicitly + registered as it disables the automatic object registration. + + + Examples: + + .. sourcecode:: python + + def registration_callback(store): + # register everything in the module except BabarClass + store.register_all(globals().values(), __name__, (BabarClass,)) + + # conditionally register BabarClass + if 'babar_relation' in store.schema: + store.register(BabarClass) + + In this example, we register all application object classes defined in the module + except `BabarClass`. This class is then registered only if the 'babar_relation' + relation type is defined in the instance schema. + + .. sourcecode:: python + + def registration_callback(store): + store.register(Elephant) + # replace Babar by Celeste + store.register_and_replace(Celeste, Babar) + + In this example, we explicitly register classes one by one: + + * the `Elephant` class + * the `Celeste` to replace `Babar` + + If at some point we register a new appobject class in this module, it won't be + registered at all without modification to the `registration_callback` + implementation. The first example will register it though, thanks to the call + to the `register_all` method. + + Controlling registry instantiation + ---------------------------------- + + The `REGISTRY_FACTORY` class dictionary allows to specify which class should + be instantiated for a given registry name. The class associated to `None` + key will be the class used when there is no specific class for a name. + """ + + def __init__(self, debugmode=False): + super(RegistryStore, self).__init__() + self.debugmode = debugmode + + def reset(self): + """clear all registries managed by this store""" + # don't use self.clear, we want to keep existing subdictionaries + for subdict in self.values(): + subdict.clear() + self._lastmodifs = {} + + def __getitem__(self, name): + """return the registry (dictionary of class objects) associated to + this name + """ + try: + return super(RegistryStore, self).__getitem__(name) + except KeyError: + exc = RegistryNotFound(name) + exc.__traceback__ = sys.exc_info()[-1] + raise exc + + # methods for explicit (un)registration ################################### + + # default class, when no specific class set + REGISTRY_FACTORY = {None: Registry} + + def registry_class(self, regid): + """return existing registry named regid or use factory to create one and + return it""" + try: + return self.REGISTRY_FACTORY[regid] + except KeyError: + return self.REGISTRY_FACTORY[None] + + def setdefault(self, regid): + try: + return self[regid] + except RegistryNotFound: + self[regid] = self.registry_class(regid)(self.debugmode) + return self[regid] + + def register_all(self, objects, modname, butclasses=()): + """register registrable objects into `objects`. + + Registrable objects are properly configured subclasses of + :class:`RegistrableObject`. Objects which are not defined in the module + `modname` or which are in `butclasses` won't be registered. + + Typical usage is: + + .. sourcecode:: python + + store.register_all(globals().values(), __name__, (ClassIWantToRegisterExplicitly,)) + + So you get partially automatic registration, keeping manual registration + for some object (to use + :meth:`~logilab.common.registry.RegistryStore.register_and_replace` for + instance). + """ + assert isinstance(modname, string_types), \ + 'modname expected to be a module name (ie string), got %r' % modname + for obj in objects: + if self.is_registrable(obj) and obj.__module__ == modname and not obj in butclasses: + if isinstance(obj, type): + self._load_ancestors_then_object(modname, obj, butclasses) + else: + self.register(obj) + + def register(self, obj, registryname=None, oid=None, clear=False): + """register `obj` implementation into `registryname` or + `obj.__registries__` if not specified, with identifier `oid` or + `obj.__regid__` if not specified. + + If `clear` is true, all objects with the same identifier will be + previously unregistered. + """ + assert not obj.__dict__.get('__abstract__'), obj + for registryname in obj_registries(obj, registryname): + registry = self.setdefault(registryname) + registry.register(obj, oid=oid, clear=clear) + self.debug("register %s in %s['%s']", + registry.objname(obj), registryname, oid or obj.__regid__) + self._loadedmods.setdefault(obj.__module__, {})[registry.objid(obj)] = obj + + def unregister(self, obj, registryname=None): + """unregister `obj` object from the registry `registryname` or + `obj.__registries__` if not specified. + """ + for registryname in obj_registries(obj, registryname): + registry = self[registryname] + registry.unregister(obj) + self.debug("unregister %s from %s['%s']", + registry.objname(obj), registryname, obj.__regid__) + + def register_and_replace(self, obj, replaced, registryname=None): + """register `obj` object into `registryname` or + `obj.__registries__` if not specified. If found, the `replaced` object + will be unregistered first (else a warning will be issued as it is + generally unexpected). + """ + for registryname in obj_registries(obj, registryname): + registry = self[registryname] + registry.register_and_replace(obj, replaced) + self.debug("register %s in %s['%s'] instead of %s", + registry.objname(obj), registryname, obj.__regid__, + registry.objname(replaced)) + + # initialization methods ################################################### + + def init_registration(self, path, extrapath=None): + """reset registry and walk down path to return list of (path, name) + file modules to be loaded""" + # XXX make this private by renaming it to _init_registration ? + self.reset() + # compute list of all modules that have to be loaded + self._toloadmods, filemods = _toload_info(path, extrapath) + # XXX is _loadedmods still necessary ? It seems like it's useful + # to avoid loading same module twice, especially with the + # _load_ancestors_then_object logic but this needs to be checked + self._loadedmods = {} + return filemods + + def register_objects(self, path, extrapath=None): + """register all objects found walking down """ + # load views from each directory in the instance's path + # XXX inline init_registration ? + filemods = self.init_registration(path, extrapath) + for filepath, modname in filemods: + self.load_file(filepath, modname) + self.initialization_completed() + + def initialization_completed(self): + """call initialization_completed() on all known registries""" + for reg in self.values(): + reg.initialization_completed() + + def _mdate(self, filepath): + """ return the modification date of a file path """ + try: + return stat(filepath)[-2] + except OSError: + # this typically happens on emacs backup files (.#foo.py) + self.warning('Unable to load %s. It is likely to be a backup file', + filepath) + return None + + def is_reload_needed(self, path): + """return True if something module changed and the registry should be + reloaded + """ + lastmodifs = self._lastmodifs + for fileordir in path: + if isdir(fileordir) and exists(join(fileordir, '__init__.py')): + if self.is_reload_needed([join(fileordir, fname) + for fname in listdir(fileordir)]): + return True + elif fileordir[-3:] == '.py': + mdate = self._mdate(fileordir) + if mdate is None: + continue # backup file, see _mdate implementation + elif "flymake" in fileordir: + # flymake + pylint in use, don't consider these they will corrupt the registry + continue + if fileordir not in lastmodifs or lastmodifs[fileordir] < mdate: + self.info('File %s changed since last visit', fileordir) + return True + return False + + def load_file(self, filepath, modname): + """ load registrable objects (if any) from a python file """ + from logilab.common.modutils import load_module_from_name + if modname in self._loadedmods: + return + self._loadedmods[modname] = {} + mdate = self._mdate(filepath) + if mdate is None: + return # backup file, see _mdate implementation + elif "flymake" in filepath: + # flymake + pylint in use, don't consider these they will corrupt the registry + return + # set update time before module loading, else we get some reloading + # weirdness in case of syntax error or other error while importing the + # module + self._lastmodifs[filepath] = mdate + # load the module + module = load_module_from_name(modname) + self.load_module(module) + + def load_module(self, module): + """Automatically handle module objects registration. + + Instances are registered as soon as they are hashable and have the + following attributes: + + * __regid__ (a string) + * __select__ (a callable) + * __registries__ (a tuple/list of string) + + For classes this is a bit more complicated : + + - first ensure parent classes are already registered + + - class with __abstract__ == True in their local dictionary are skipped + + - object class needs to have registries and identifier properly set to a + non empty string to be registered. + """ + self.info('loading %s from %s', module.__name__, module.__file__) + if hasattr(module, 'registration_callback'): + module.registration_callback(self) + else: + self.register_all(vars(module).values(), module.__name__) + + def _load_ancestors_then_object(self, modname, objectcls, butclasses=()): + """handle class registration according to rules defined in + :meth:`load_module` + """ + # backward compat, we used to allow whatever else than classes + if not isinstance(objectcls, type): + if self.is_registrable(objectcls) and objectcls.__module__ == modname: + self.register(objectcls) + return + # imported classes + objmodname = objectcls.__module__ + if objmodname != modname: + # The module of the object is not the same as the currently + # worked on module, or this is actually an instance, which + # has no module at all + if objmodname in self._toloadmods: + # if this is still scheduled for loading, let's proceed immediately, + # but using the object module + self.load_file(self._toloadmods[objmodname], objmodname) + return + # ensure object hasn't been already processed + clsid = '%s.%s' % (modname, objectcls.__name__) + if clsid in self._loadedmods[modname]: + return + self._loadedmods[modname][clsid] = objectcls + # ensure ancestors are registered + for parent in objectcls.__bases__: + self._load_ancestors_then_object(modname, parent, butclasses) + # ensure object is registrable + if objectcls in butclasses or not self.is_registrable(objectcls): + return + # backward compat + reg = self.setdefault(obj_registries(objectcls)[0]) + if reg.objname(objectcls)[0] == '_': + warn("[lgc 0.59] object whose name start with '_' won't be " + "skipped anymore at some point, use __abstract__ = True " + "instead (%s)" % objectcls, DeprecationWarning) + return + # register, finally + self.register(objectcls) + + @classmethod + def is_registrable(cls, obj): + """ensure `obj` should be registered + + as arbitrary stuff may be registered, do a lot of check and warn about + weird cases (think to dumb proxy objects) + """ + if isinstance(obj, type): + if not issubclass(obj, RegistrableObject): + # ducktyping backward compat + if not (getattr(obj, '__registries__', None) + and getattr(obj, '__regid__', None) + and getattr(obj, '__select__', None)): + return False + elif issubclass(obj, RegistrableInstance): + return False + elif not isinstance(obj, RegistrableInstance): + return False + if not obj.__regid__: + return False # no regid + registries = obj.__registries__ + if not registries: + return False # no registries + selector = obj.__select__ + if not selector: + return False # no selector + if obj.__dict__.get('__abstract__', False): + return False + # then detect potential problems that should be warned + if not isinstance(registries, (tuple, list)): + cls.warning('%s has __registries__ which is not a list or tuple', obj) + return False + if not callable(selector): + cls.warning('%s has not callable __select__', obj) + return False + return True + + # these are overridden by set_log_methods below + # only defining here to prevent pylint from complaining + info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None + + +# init logging +set_log_methods(RegistryStore, getLogger('registry.store')) +set_log_methods(Registry, getLogger('registry')) + + +# helpers for debugging selectors +TRACED_OIDS = None + +def _trace_selector(cls, selector, args, ret): + vobj = args[0] + if TRACED_OIDS == 'all' or vobj.__regid__ in TRACED_OIDS: + print('%s -> %s for %s(%s)' % (cls, ret, vobj, vobj.__regid__)) + +def _lltrace(selector): + """use this decorator on your predicates so they become traceable with + :class:`traced_selection` + """ + def traced(cls, *args, **kwargs): + ret = selector(cls, *args, **kwargs) + if TRACED_OIDS is not None: + _trace_selector(cls, selector, args, ret) + return ret + traced.__name__ = selector.__name__ + traced.__doc__ = selector.__doc__ + return traced + +class traced_selection(object): # pylint: disable=C0103 + """ + Typical usage is : + + .. sourcecode:: python + + >>> from logilab.common.registry import traced_selection + >>> with traced_selection(): + ... # some code in which you want to debug selectors + ... # for all objects + + This will yield lines like this in the logs:: + + selector one_line_rset returned 0 for + + You can also give to :class:`traced_selection` the identifiers of objects on + which you want to debug selection ('oid1' and 'oid2' in the example above). + + .. sourcecode:: python + + >>> with traced_selection( ('regid1', 'regid2') ): + ... # some code in which you want to debug selectors + ... # for objects with __regid__ 'regid1' and 'regid2' + + A potentially useful point to set up such a tracing function is + the `logilab.common.registry.Registry.select` method body. + """ + + def __init__(self, traced='all'): + self.traced = traced + + def __enter__(self): + global TRACED_OIDS + TRACED_OIDS = self.traced + + def __exit__(self, exctype, exc, traceback): + global TRACED_OIDS + TRACED_OIDS = None + return traceback is None + +# selector base classes and operations ######################################## + +def objectify_predicate(selector_func): + """Most of the time, a simple score function is enough to build a selector. + The :func:`objectify_predicate` decorator turn it into a proper selector + class:: + + @objectify_predicate + def one(cls, req, rset=None, **kwargs): + return 1 + + class MyView(View): + __select__ = View.__select__ & one() + + """ + return type(selector_func.__name__, (Predicate,), + {'__doc__': selector_func.__doc__, + '__call__': lambda self, *a, **kw: selector_func(*a, **kw)}) + + +_PREDICATES = {} + +def wrap_predicates(decorator): + for predicate in _PREDICATES.values(): + if not '_decorators' in predicate.__dict__: + predicate._decorators = set() + if decorator in predicate._decorators: + continue + predicate._decorators.add(decorator) + predicate.__call__ = decorator(predicate.__call__) + +class PredicateMetaClass(type): + def __new__(mcs, *args, **kwargs): + # use __new__ so subclasses doesn't have to call Predicate.__init__ + inst = type.__new__(mcs, *args, **kwargs) + proxy = weakref.proxy(inst, lambda p: _PREDICATES.pop(id(p))) + _PREDICATES[id(proxy)] = proxy + return inst + + +@add_metaclass(PredicateMetaClass) +class Predicate(object): + """base class for selector classes providing implementation + for operators ``&``, ``|`` and ``~`` + + This class is only here to give access to binary operators, the selector + logic itself should be implemented in the :meth:`__call__` method. Notice it + should usually accept any arbitrary arguments (the context), though that may + vary depending on your usage of the registry. + + a selector is called to help choosing the correct object for a + particular context by returning a score (`int`) telling how well + the implementation given as first argument fit to the given context. + + 0 score means that the class doesn't apply. + """ + + @property + def func_name(self): + # backward compatibility + return self.__class__.__name__ + + def search_selector(self, selector): + """search for the given selector, selector instance or tuple of + selectors in the selectors tree. Return None if not found. + """ + if self is selector: + return self + if (isinstance(selector, type) or isinstance(selector, tuple)) and \ + isinstance(self, selector): + return self + return None + + def __str__(self): + return self.__class__.__name__ + + def __and__(self, other): + return AndPredicate(self, other) + def __rand__(self, other): + return AndPredicate(other, self) + def __iand__(self, other): + return AndPredicate(self, other) + def __or__(self, other): + return OrPredicate(self, other) + def __ror__(self, other): + return OrPredicate(other, self) + def __ior__(self, other): + return OrPredicate(self, other) + + def __invert__(self): + return NotPredicate(self) + + # XXX (function | function) or (function & function) not managed yet + + def __call__(self, cls, *args, **kwargs): + return NotImplementedError("selector %s must implement its logic " + "in its __call__ method" % self.__class__) + + def __repr__(self): + return u'' % (self.__class__.__name__, id(self)) + + +class MultiPredicate(Predicate): + """base class for compound selector classes""" + + def __init__(self, *selectors): + self.selectors = self.merge_selectors(selectors) + + def __str__(self): + return '%s(%s)' % (self.__class__.__name__, + ','.join(str(s) for s in self.selectors)) + + @classmethod + def merge_selectors(cls, selectors): + """deal with selector instanciation when necessary and merge + multi-selectors if possible: + + AndPredicate(AndPredicate(sel1, sel2), AndPredicate(sel3, sel4)) + ==> AndPredicate(sel1, sel2, sel3, sel4) + """ + merged_selectors = [] + for selector in selectors: + # XXX do we really want magic-transformations below? + # if so, wanna warn about them? + if isinstance(selector, types.FunctionType): + selector = objectify_predicate(selector)() + if isinstance(selector, type) and issubclass(selector, Predicate): + selector = selector() + assert isinstance(selector, Predicate), selector + if isinstance(selector, cls): + merged_selectors += selector.selectors + else: + merged_selectors.append(selector) + return merged_selectors + + def search_selector(self, selector): + """search for the given selector or selector instance (or tuple of + selectors) in the selectors tree. Return None if not found + """ + for childselector in self.selectors: + if childselector is selector: + return childselector + found = childselector.search_selector(selector) + if found is not None: + return found + # if not found in children, maybe we are looking for self? + return super(MultiPredicate, self).search_selector(selector) + + +class AndPredicate(MultiPredicate): + """and-chained selectors""" + def __call__(self, cls, *args, **kwargs): + score = 0 + for selector in self.selectors: + partscore = selector(cls, *args, **kwargs) + if not partscore: + return 0 + score += partscore + return score + + +class OrPredicate(MultiPredicate): + """or-chained selectors""" + def __call__(self, cls, *args, **kwargs): + for selector in self.selectors: + partscore = selector(cls, *args, **kwargs) + if partscore: + return partscore + return 0 + +class NotPredicate(Predicate): + """negation selector""" + def __init__(self, selector): + self.selector = selector + + def __call__(self, cls, *args, **kwargs): + score = self.selector(cls, *args, **kwargs) + return int(not score) + + def __str__(self): + return 'NOT(%s)' % self.selector + + +class yes(Predicate): # pylint: disable=C0103 + """Return the score given as parameter, with a default score of 0.5 so any + other selector take precedence. + + Usually used for objects which can be selected whatever the context, or + also sometimes to add arbitrary points to a score. + + Take care, `yes(0)` could be named 'no'... + """ + def __init__(self, score=0.5): + self.score = score + + def __call__(self, *args, **kwargs): + return self.score + + +# deprecated stuff ############################################################# + +from logilab.common.deprecation import deprecated + +@deprecated('[lgc 0.59] use Registry.objid class method instead') +def classid(cls): + return '%s.%s' % (cls.__module__, cls.__name__) + +@deprecated('[lgc 0.59] use obj_registries function instead') +def class_registries(cls, registryname): + return obj_registries(cls, registryname) + diff --git a/pymode/libs/logilab/common/shellutils.py b/pymode/libs/logilab/common/shellutils.py new file mode 100644 index 00000000..4e689560 --- /dev/null +++ b/pymode/libs/logilab/common/shellutils.py @@ -0,0 +1,462 @@ +# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""shell/term utilities, useful to write some python scripts instead of shell +scripts. +""" + +from __future__ import print_function + +__docformat__ = "restructuredtext en" + +import os +import glob +import shutil +import stat +import sys +import tempfile +import time +import fnmatch +import errno +import string +import random +import subprocess +from os.path import exists, isdir, islink, basename, join + +from six import string_types +from six.moves import range, input as raw_input + +from logilab.common import STD_BLACKLIST, _handle_blacklist +from logilab.common.compat import str_to_bytes +from logilab.common.deprecation import deprecated + +try: + from logilab.common.proc import ProcInfo, NoSuchProcess +except ImportError: + # windows platform + class NoSuchProcess(Exception): pass + + def ProcInfo(pid): + raise NoSuchProcess() + + +class tempdir(object): + + def __enter__(self): + self.path = tempfile.mkdtemp() + return self.path + + def __exit__(self, exctype, value, traceback): + # rmtree in all cases + shutil.rmtree(self.path) + return traceback is None + + +class pushd(object): + def __init__(self, directory): + self.directory = directory + + def __enter__(self): + self.cwd = os.getcwd() + os.chdir(self.directory) + return self.directory + + def __exit__(self, exctype, value, traceback): + os.chdir(self.cwd) + + +def chown(path, login=None, group=None): + """Same as `os.chown` function but accepting user login or group name as + argument. If login or group is omitted, it's left unchanged. + + Note: you must own the file to chown it (or be root). Otherwise OSError is raised. + """ + if login is None: + uid = -1 + else: + try: + uid = int(login) + except ValueError: + import pwd # Platforms: Unix + uid = pwd.getpwnam(login).pw_uid + if group is None: + gid = -1 + else: + try: + gid = int(group) + except ValueError: + import grp + gid = grp.getgrnam(group).gr_gid + os.chown(path, uid, gid) + +def mv(source, destination, _action=shutil.move): + """A shell-like mv, supporting wildcards. + """ + sources = glob.glob(source) + if len(sources) > 1: + assert isdir(destination) + for filename in sources: + _action(filename, join(destination, basename(filename))) + else: + try: + source = sources[0] + except IndexError: + raise OSError('No file matching %s' % source) + if isdir(destination) and exists(destination): + destination = join(destination, basename(source)) + try: + _action(source, destination) + except OSError as ex: + raise OSError('Unable to move %r to %r (%s)' % ( + source, destination, ex)) + +def rm(*files): + """A shell-like rm, supporting wildcards. + """ + for wfile in files: + for filename in glob.glob(wfile): + if islink(filename): + os.remove(filename) + elif isdir(filename): + shutil.rmtree(filename) + else: + os.remove(filename) + +def cp(source, destination): + """A shell-like cp, supporting wildcards. + """ + mv(source, destination, _action=shutil.copy) + +def find(directory, exts, exclude=False, blacklist=STD_BLACKLIST): + """Recursively find files ending with the given extensions from the directory. + + :type directory: str + :param directory: + directory where the search should start + + :type exts: basestring or list or tuple + :param exts: + extensions or lists or extensions to search + + :type exclude: boolean + :param exts: + if this argument is True, returning files NOT ending with the given + extensions + + :type blacklist: list or tuple + :param blacklist: + optional list of files or directory to ignore, default to the value of + `logilab.common.STD_BLACKLIST` + + :rtype: list + :return: + the list of all matching files + """ + if isinstance(exts, string_types): + exts = (exts,) + if exclude: + def match(filename, exts): + for ext in exts: + if filename.endswith(ext): + return False + return True + else: + def match(filename, exts): + for ext in exts: + if filename.endswith(ext): + return True + return False + files = [] + for dirpath, dirnames, filenames in os.walk(directory): + _handle_blacklist(blacklist, dirnames, filenames) + # don't append files if the directory is blacklisted + dirname = basename(dirpath) + if dirname in blacklist: + continue + files.extend([join(dirpath, f) for f in filenames if match(f, exts)]) + return files + + +def globfind(directory, pattern, blacklist=STD_BLACKLIST): + """Recursively finds files matching glob `pattern` under `directory`. + + This is an alternative to `logilab.common.shellutils.find`. + + :type directory: str + :param directory: + directory where the search should start + + :type pattern: basestring + :param pattern: + the glob pattern (e.g *.py, foo*.py, etc.) + + :type blacklist: list or tuple + :param blacklist: + optional list of files or directory to ignore, default to the value of + `logilab.common.STD_BLACKLIST` + + :rtype: iterator + :return: + iterator over the list of all matching files + """ + for curdir, dirnames, filenames in os.walk(directory): + _handle_blacklist(blacklist, dirnames, filenames) + for fname in fnmatch.filter(filenames, pattern): + yield join(curdir, fname) + +def unzip(archive, destdir): + import zipfile + if not exists(destdir): + os.mkdir(destdir) + zfobj = zipfile.ZipFile(archive) + for name in zfobj.namelist(): + if name.endswith('/'): + os.mkdir(join(destdir, name)) + else: + outfile = open(join(destdir, name), 'wb') + outfile.write(zfobj.read(name)) + outfile.close() + + +class Execute: + """This is a deadlock safe version of popen2 (no stdin), that returns + an object with errorlevel, out and err. + """ + + def __init__(self, command): + cmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + self.out, self.err = cmd.communicate() + self.status = os.WEXITSTATUS(cmd.returncode) + +Execute = deprecated('Use subprocess.Popen instead')(Execute) + + +def acquire_lock(lock_file, max_try=10, delay=10, max_delay=3600): + """Acquire a lock represented by a file on the file system + + If the process written in lock file doesn't exist anymore, we remove the + lock file immediately + If age of the lock_file is greater than max_delay, then we raise a UserWarning + """ + count = abs(max_try) + while count: + try: + fd = os.open(lock_file, os.O_EXCL | os.O_RDWR | os.O_CREAT) + os.write(fd, str_to_bytes(str(os.getpid())) ) + os.close(fd) + return True + except OSError as e: + if e.errno == errno.EEXIST: + try: + fd = open(lock_file, "r") + pid = int(fd.readline()) + pi = ProcInfo(pid) + age = (time.time() - os.stat(lock_file)[stat.ST_MTIME]) + if age / max_delay > 1 : + raise UserWarning("Command '%s' (pid %s) has locked the " + "file '%s' for %s minutes" + % (pi.name(), pid, lock_file, age/60)) + except UserWarning: + raise + except NoSuchProcess: + os.remove(lock_file) + except Exception: + # The try block is not essential. can be skipped. + # Note: ProcInfo object is only available for linux + # process information are not accessible... + # or lock_file is no more present... + pass + else: + raise + count -= 1 + time.sleep(delay) + else: + raise Exception('Unable to acquire %s' % lock_file) + +def release_lock(lock_file): + """Release a lock represented by a file on the file system.""" + os.remove(lock_file) + + +class ProgressBar(object): + """A simple text progression bar.""" + + def __init__(self, nbops, size=20, stream=sys.stdout, title=''): + if title: + self._fstr = '\r%s [%%-%ss]' % (title, int(size)) + else: + self._fstr = '\r[%%-%ss]' % int(size) + self._stream = stream + self._total = nbops + self._size = size + self._current = 0 + self._progress = 0 + self._current_text = None + self._last_text_write_size = 0 + + def _get_text(self): + return self._current_text + + def _set_text(self, text=None): + if text != self._current_text: + self._current_text = text + self.refresh() + + def _del_text(self): + self.text = None + + text = property(_get_text, _set_text, _del_text) + + def update(self, offset=1, exact=False): + """Move FORWARD to new cursor position (cursor will never go backward). + + :offset: fraction of ``size`` + + :exact: + + - False: offset relative to current cursor position if True + - True: offset as an asbsolute position + + """ + if exact: + self._current = offset + else: + self._current += offset + + progress = int((float(self._current)/float(self._total))*self._size) + if progress > self._progress: + self._progress = progress + self.refresh() + + def refresh(self): + """Refresh the progression bar display.""" + self._stream.write(self._fstr % ('=' * min(self._progress, self._size)) ) + if self._last_text_write_size or self._current_text: + template = ' %%-%is' % (self._last_text_write_size) + text = self._current_text + if text is None: + text = '' + self._stream.write(template % text) + self._last_text_write_size = len(text.rstrip()) + self._stream.flush() + + def finish(self): + self._stream.write('\n') + self._stream.flush() + + +class DummyProgressBar(object): + __slot__ = ('text',) + + def refresh(self): + pass + def update(self): + pass + def finish(self): + pass + + +_MARKER = object() +class progress(object): + + def __init__(self, nbops=_MARKER, size=_MARKER, stream=_MARKER, title=_MARKER, enabled=True): + self.nbops = nbops + self.size = size + self.stream = stream + self.title = title + self.enabled = enabled + + def __enter__(self): + if self.enabled: + kwargs = {} + for attr in ('nbops', 'size', 'stream', 'title'): + value = getattr(self, attr) + if value is not _MARKER: + kwargs[attr] = value + self.pb = ProgressBar(**kwargs) + else: + self.pb = DummyProgressBar() + return self.pb + + def __exit__(self, exc_type, exc_val, exc_tb): + self.pb.finish() + +class RawInput(object): + + def __init__(self, input=None, printer=None): + self._input = input or raw_input + self._print = printer + + def ask(self, question, options, default): + assert default in options + choices = [] + for option in options: + if option == default: + label = option[0].upper() + else: + label = option[0].lower() + if len(option) > 1: + label += '(%s)' % option[1:].lower() + choices.append((option, label)) + prompt = "%s [%s]: " % (question, + '/'.join([opt[1] for opt in choices])) + tries = 3 + while tries > 0: + answer = self._input(prompt).strip().lower() + if not answer: + return default + possible = [option for option, label in choices + if option.lower().startswith(answer)] + if len(possible) == 1: + return possible[0] + elif len(possible) == 0: + msg = '%s is not an option.' % answer + else: + msg = ('%s is an ambiguous answer, do you mean %s ?' % ( + answer, ' or '.join(possible))) + if self._print: + self._print(msg) + else: + print(msg) + tries -= 1 + raise Exception('unable to get a sensible answer') + + def confirm(self, question, default_is_yes=True): + default = default_is_yes and 'y' or 'n' + answer = self.ask(question, ('y', 'n'), default) + return answer == 'y' + +ASK = RawInput() + + +def getlogin(): + """avoid using os.getlogin() because of strange tty / stdin problems + (man 3 getlogin) + Another solution would be to use $LOGNAME, $USER or $USERNAME + """ + if sys.platform != 'win32': + import pwd # Platforms: Unix + return pwd.getpwuid(os.getuid())[0] + else: + return os.environ['USERNAME'] + +def generate_password(length=8, vocab=string.ascii_letters + string.digits): + """dumb password generation function""" + pwd = '' + for i in range(length): + pwd += random.choice(vocab) + return pwd diff --git a/pymode/libs/logilab/common/sphinx_ext.py b/pymode/libs/logilab/common/sphinx_ext.py new file mode 100644 index 00000000..a24608ce --- /dev/null +++ b/pymode/libs/logilab/common/sphinx_ext.py @@ -0,0 +1,87 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +from logilab.common.decorators import monkeypatch + +from sphinx.ext import autodoc + +class DocstringOnlyModuleDocumenter(autodoc.ModuleDocumenter): + objtype = 'docstring' + def format_signature(self): + pass + def add_directive_header(self, sig): + pass + def document_members(self, all_members=False): + pass + + def resolve_name(self, modname, parents, path, base): + if modname is not None: + return modname, parents + [base] + return (path or '') + base, [] + + +#autodoc.add_documenter(DocstringOnlyModuleDocumenter) + +def setup(app): + app.add_autodocumenter(DocstringOnlyModuleDocumenter) + + + +from sphinx.ext.autodoc import (ViewList, Options, AutodocReporter, nodes, + assemble_option_dict, nested_parse_with_titles) + +@monkeypatch(autodoc.AutoDirective) +def run(self): + self.filename_set = set() # a set of dependent filenames + self.reporter = self.state.document.reporter + self.env = self.state.document.settings.env + self.warnings = [] + self.result = ViewList() + + # find out what documenter to call + objtype = self.name[4:] + doc_class = self._registry[objtype] + # process the options with the selected documenter's option_spec + self.genopt = Options(assemble_option_dict( + self.options.items(), doc_class.option_spec)) + # generate the output + documenter = doc_class(self, self.arguments[0]) + documenter.generate(more_content=self.content) + if not self.result: + return self.warnings + + # record all filenames as dependencies -- this will at least + # partially make automatic invalidation possible + for fn in self.filename_set: + self.env.note_dependency(fn) + + # use a custom reporter that correctly assigns lines to source + # filename/description and lineno + old_reporter = self.state.memo.reporter + self.state.memo.reporter = AutodocReporter(self.result, + self.state.memo.reporter) + if self.name in ('automodule', 'autodocstring'): + node = nodes.section() + # necessary so that the child nodes get the right source/line set + node.document = self.state.document + nested_parse_with_titles(self.state, self.result, node) + else: + node = nodes.paragraph() + node.document = self.state.document + self.state.nested_parse(self.result, 0, node) + self.state.memo.reporter = old_reporter + return self.warnings + node.children diff --git a/pymode/libs/logilab/common/sphinxutils.py b/pymode/libs/logilab/common/sphinxutils.py new file mode 100644 index 00000000..ab6e8a18 --- /dev/null +++ b/pymode/libs/logilab/common/sphinxutils.py @@ -0,0 +1,122 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Sphinx utils + +ModuleGenerator: Generate a file that lists all the modules of a list of +packages in order to pull all the docstring. +This should not be used in a makefile to systematically generate sphinx +documentation! + +Typical usage: + +>>> from logilab.common.sphinxutils import ModuleGenerator +>>> mgen = ModuleGenerator('logilab common', '/home/adim/src/logilab/common') +>>> mgen.generate('api_logilab_common.rst', exclude_dirs=('test',)) +""" + +import os, sys +import os.path as osp +import inspect + +from logilab.common import STD_BLACKLIST +from logilab.common.shellutils import globfind +from logilab.common.modutils import load_module_from_file, modpath_from_file + +def module_members(module): + members = [] + for name, value in inspect.getmembers(module): + if getattr(value, '__module__', None) == module.__name__: + members.append( (name, value) ) + return sorted(members) + + +def class_members(klass): + return sorted([name for name in vars(klass) + if name not in ('__doc__', '__module__', + '__dict__', '__weakref__')]) + +class ModuleGenerator: + file_header = """.. -*- coding: utf-8 -*-\n\n%s\n""" + module_def = """ +:mod:`%s` +=======%s + +.. automodule:: %s + :members: %s +""" + class_def = """ + +.. autoclass:: %s + :members: %s + +""" + + def __init__(self, project_title, code_dir): + self.title = project_title + self.code_dir = osp.abspath(code_dir) + + def generate(self, dest_file, exclude_dirs=STD_BLACKLIST): + """make the module file""" + self.fn = open(dest_file, 'w') + num = len(self.title) + 6 + title = "=" * num + "\n %s API\n" % self.title + "=" * num + self.fn.write(self.file_header % title) + self.gen_modules(exclude_dirs=exclude_dirs) + self.fn.close() + + def gen_modules(self, exclude_dirs): + """generate all modules""" + for module in self.find_modules(exclude_dirs): + modname = module.__name__ + classes = [] + modmembers = [] + for objname, obj in module_members(module): + if inspect.isclass(obj): + classmembers = class_members(obj) + classes.append( (objname, classmembers) ) + else: + modmembers.append(objname) + self.fn.write(self.module_def % (modname, '=' * len(modname), + modname, + ', '.join(modmembers))) + for klass, members in classes: + self.fn.write(self.class_def % (klass, ', '.join(members))) + + def find_modules(self, exclude_dirs): + basepath = osp.dirname(self.code_dir) + basedir = osp.basename(basepath) + osp.sep + if basedir not in sys.path: + sys.path.insert(1, basedir) + for filepath in globfind(self.code_dir, '*.py', exclude_dirs): + if osp.basename(filepath) in ('setup.py', '__pkginfo__.py'): + continue + try: + module = load_module_from_file(filepath) + except: # module might be broken or magic + dotted_path = modpath_from_file(filepath) + module = type('.'.join(dotted_path), (), {}) # mock it + yield module + + +if __name__ == '__main__': + # example : + title, code_dir, outfile = sys.argv[1:] + generator = ModuleGenerator(title, code_dir) + # XXX modnames = ['logilab'] + generator.generate(outfile, ('test', 'tests', 'examples', + 'data', 'doc', '.hg', 'migration')) diff --git a/pymode/libs/logilab/common/table.py b/pymode/libs/logilab/common/table.py new file mode 100644 index 00000000..2f3df694 --- /dev/null +++ b/pymode/libs/logilab/common/table.py @@ -0,0 +1,929 @@ +# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Table management module.""" + +from __future__ import print_function + +__docformat__ = "restructuredtext en" + +from six.moves import range + +class Table(object): + """Table defines a data table with column and row names. + inv: + len(self.data) <= len(self.row_names) + forall(self.data, lambda x: len(x) <= len(self.col_names)) + """ + + def __init__(self, default_value=0, col_names=None, row_names=None): + self.col_names = [] + self.row_names = [] + self.data = [] + self.default_value = default_value + if col_names: + self.create_columns(col_names) + if row_names: + self.create_rows(row_names) + + def _next_row_name(self): + return 'row%s' % (len(self.row_names)+1) + + def __iter__(self): + return iter(self.data) + + def __eq__(self, other): + if other is None: + return False + else: + return list(self) == list(other) + + __hash__ = object.__hash__ + + def __ne__(self, other): + return not self == other + + def __len__(self): + return len(self.row_names) + + ## Rows / Columns creation ################################################# + def create_rows(self, row_names): + """Appends row_names to the list of existing rows + """ + self.row_names.extend(row_names) + for row_name in row_names: + self.data.append([self.default_value]*len(self.col_names)) + + def create_columns(self, col_names): + """Appends col_names to the list of existing columns + """ + for col_name in col_names: + self.create_column(col_name) + + def create_row(self, row_name=None): + """Creates a rowname to the row_names list + """ + row_name = row_name or self._next_row_name() + self.row_names.append(row_name) + self.data.append([self.default_value]*len(self.col_names)) + + + def create_column(self, col_name): + """Creates a colname to the col_names list + """ + self.col_names.append(col_name) + for row in self.data: + row.append(self.default_value) + + ## Sort by column ########################################################## + def sort_by_column_id(self, col_id, method = 'asc'): + """Sorts the table (in-place) according to data stored in col_id + """ + try: + col_index = self.col_names.index(col_id) + self.sort_by_column_index(col_index, method) + except ValueError: + raise KeyError("Col (%s) not found in table" % (col_id)) + + + def sort_by_column_index(self, col_index, method = 'asc'): + """Sorts the table 'in-place' according to data stored in col_index + + method should be in ('asc', 'desc') + """ + sort_list = sorted([(row[col_index], row, row_name) + for row, row_name in zip(self.data, self.row_names)]) + # Sorting sort_list will sort according to col_index + # If we want reverse sort, then reverse list + if method.lower() == 'desc': + sort_list.reverse() + + # Rebuild data / row names + self.data = [] + self.row_names = [] + for val, row, row_name in sort_list: + self.data.append(row) + self.row_names.append(row_name) + + def groupby(self, colname, *others): + """builds indexes of data + :returns: nested dictionaries pointing to actual rows + """ + groups = {} + colnames = (colname,) + others + col_indexes = [self.col_names.index(col_id) for col_id in colnames] + for row in self.data: + ptr = groups + for col_index in col_indexes[:-1]: + ptr = ptr.setdefault(row[col_index], {}) + ptr = ptr.setdefault(row[col_indexes[-1]], + Table(default_value=self.default_value, + col_names=self.col_names)) + ptr.append_row(tuple(row)) + return groups + + def select(self, colname, value): + grouped = self.groupby(colname) + try: + return grouped[value] + except KeyError: + return [] + + def remove(self, colname, value): + col_index = self.col_names.index(colname) + for row in self.data[:]: + if row[col_index] == value: + self.data.remove(row) + + + ## The 'setter' part ####################################################### + def set_cell(self, row_index, col_index, data): + """sets value of cell 'row_indew', 'col_index' to data + """ + self.data[row_index][col_index] = data + + + def set_cell_by_ids(self, row_id, col_id, data): + """sets value of cell mapped by row_id and col_id to data + Raises a KeyError if row_id or col_id are not found in the table + """ + try: + row_index = self.row_names.index(row_id) + except ValueError: + raise KeyError("Row (%s) not found in table" % (row_id)) + else: + try: + col_index = self.col_names.index(col_id) + self.data[row_index][col_index] = data + except ValueError: + raise KeyError("Column (%s) not found in table" % (col_id)) + + + def set_row(self, row_index, row_data): + """sets the 'row_index' row + pre: + type(row_data) == types.ListType + len(row_data) == len(self.col_names) + """ + self.data[row_index] = row_data + + + def set_row_by_id(self, row_id, row_data): + """sets the 'row_id' column + pre: + type(row_data) == types.ListType + len(row_data) == len(self.row_names) + Raises a KeyError if row_id is not found + """ + try: + row_index = self.row_names.index(row_id) + self.set_row(row_index, row_data) + except ValueError: + raise KeyError('Row (%s) not found in table' % (row_id)) + + + def append_row(self, row_data, row_name=None): + """Appends a row to the table + pre: + type(row_data) == types.ListType + len(row_data) == len(self.col_names) + """ + row_name = row_name or self._next_row_name() + self.row_names.append(row_name) + self.data.append(row_data) + return len(self.data) - 1 + + def insert_row(self, index, row_data, row_name=None): + """Appends row_data before 'index' in the table. To make 'insert' + behave like 'list.insert', inserting in an out of range index will + insert row_data to the end of the list + pre: + type(row_data) == types.ListType + len(row_data) == len(self.col_names) + """ + row_name = row_name or self._next_row_name() + self.row_names.insert(index, row_name) + self.data.insert(index, row_data) + + + def delete_row(self, index): + """Deletes the 'index' row in the table, and returns it. + Raises an IndexError if index is out of range + """ + self.row_names.pop(index) + return self.data.pop(index) + + + def delete_row_by_id(self, row_id): + """Deletes the 'row_id' row in the table. + Raises a KeyError if row_id was not found. + """ + try: + row_index = self.row_names.index(row_id) + self.delete_row(row_index) + except ValueError: + raise KeyError('Row (%s) not found in table' % (row_id)) + + + def set_column(self, col_index, col_data): + """sets the 'col_index' column + pre: + type(col_data) == types.ListType + len(col_data) == len(self.row_names) + """ + + for row_index, cell_data in enumerate(col_data): + self.data[row_index][col_index] = cell_data + + + def set_column_by_id(self, col_id, col_data): + """sets the 'col_id' column + pre: + type(col_data) == types.ListType + len(col_data) == len(self.col_names) + Raises a KeyError if col_id is not found + """ + try: + col_index = self.col_names.index(col_id) + self.set_column(col_index, col_data) + except ValueError: + raise KeyError('Column (%s) not found in table' % (col_id)) + + + def append_column(self, col_data, col_name): + """Appends the 'col_index' column + pre: + type(col_data) == types.ListType + len(col_data) == len(self.row_names) + """ + self.col_names.append(col_name) + for row_index, cell_data in enumerate(col_data): + self.data[row_index].append(cell_data) + + + def insert_column(self, index, col_data, col_name): + """Appends col_data before 'index' in the table. To make 'insert' + behave like 'list.insert', inserting in an out of range index will + insert col_data to the end of the list + pre: + type(col_data) == types.ListType + len(col_data) == len(self.row_names) + """ + self.col_names.insert(index, col_name) + for row_index, cell_data in enumerate(col_data): + self.data[row_index].insert(index, cell_data) + + + def delete_column(self, index): + """Deletes the 'index' column in the table, and returns it. + Raises an IndexError if index is out of range + """ + self.col_names.pop(index) + return [row.pop(index) for row in self.data] + + + def delete_column_by_id(self, col_id): + """Deletes the 'col_id' col in the table. + Raises a KeyError if col_id was not found. + """ + try: + col_index = self.col_names.index(col_id) + self.delete_column(col_index) + except ValueError: + raise KeyError('Column (%s) not found in table' % (col_id)) + + + ## The 'getter' part ####################################################### + + def get_shape(self): + """Returns a tuple which represents the table's shape + """ + return len(self.row_names), len(self.col_names) + shape = property(get_shape) + + def __getitem__(self, indices): + """provided for convenience""" + rows, multirows = None, False + cols, multicols = None, False + if isinstance(indices, tuple): + rows = indices[0] + if len(indices) > 1: + cols = indices[1] + else: + rows = indices + # define row slice + if isinstance(rows, str): + try: + rows = self.row_names.index(rows) + except ValueError: + raise KeyError("Row (%s) not found in table" % (rows)) + if isinstance(rows, int): + rows = slice(rows, rows+1) + multirows = False + else: + rows = slice(None) + multirows = True + # define col slice + if isinstance(cols, str): + try: + cols = self.col_names.index(cols) + except ValueError: + raise KeyError("Column (%s) not found in table" % (cols)) + if isinstance(cols, int): + cols = slice(cols, cols+1) + multicols = False + else: + cols = slice(None) + multicols = True + # get sub-table + tab = Table() + tab.default_value = self.default_value + tab.create_rows(self.row_names[rows]) + tab.create_columns(self.col_names[cols]) + for idx, row in enumerate(self.data[rows]): + tab.set_row(idx, row[cols]) + if multirows : + if multicols: + return tab + else: + return [item[0] for item in tab.data] + else: + if multicols: + return tab.data[0] + else: + return tab.data[0][0] + + def get_cell_by_ids(self, row_id, col_id): + """Returns the element at [row_id][col_id] + """ + try: + row_index = self.row_names.index(row_id) + except ValueError: + raise KeyError("Row (%s) not found in table" % (row_id)) + else: + try: + col_index = self.col_names.index(col_id) + except ValueError: + raise KeyError("Column (%s) not found in table" % (col_id)) + return self.data[row_index][col_index] + + def get_row_by_id(self, row_id): + """Returns the 'row_id' row + """ + try: + row_index = self.row_names.index(row_id) + except ValueError: + raise KeyError("Row (%s) not found in table" % (row_id)) + return self.data[row_index] + + def get_column_by_id(self, col_id, distinct=False): + """Returns the 'col_id' col + """ + try: + col_index = self.col_names.index(col_id) + except ValueError: + raise KeyError("Column (%s) not found in table" % (col_id)) + return self.get_column(col_index, distinct) + + def get_columns(self): + """Returns all the columns in the table + """ + return [self[:, index] for index in range(len(self.col_names))] + + def get_column(self, col_index, distinct=False): + """get a column by index""" + col = [row[col_index] for row in self.data] + if distinct: + col = list(set(col)) + return col + + def apply_stylesheet(self, stylesheet): + """Applies the stylesheet to this table + """ + for instruction in stylesheet.instructions: + eval(instruction) + + + def transpose(self): + """Keeps the self object intact, and returns the transposed (rotated) + table. + """ + transposed = Table() + transposed.create_rows(self.col_names) + transposed.create_columns(self.row_names) + for col_index, column in enumerate(self.get_columns()): + transposed.set_row(col_index, column) + return transposed + + + def pprint(self): + """returns a string representing the table in a pretty + printed 'text' format. + """ + # The maximum row name (to know the start_index of the first col) + max_row_name = 0 + for row_name in self.row_names: + if len(row_name) > max_row_name: + max_row_name = len(row_name) + col_start = max_row_name + 5 + + lines = [] + # Build the 'first' line <=> the col_names one + # The first cell <=> an empty one + col_names_line = [' '*col_start] + for col_name in self.col_names: + col_names_line.append(col_name + ' '*5) + lines.append('|' + '|'.join(col_names_line) + '|') + max_line_length = len(lines[0]) + + # Build the table + for row_index, row in enumerate(self.data): + line = [] + # First, build the row_name's cell + row_name = self.row_names[row_index] + line.append(row_name + ' '*(col_start-len(row_name))) + + # Then, build all the table's cell for this line. + for col_index, cell in enumerate(row): + col_name_length = len(self.col_names[col_index]) + 5 + data = str(cell) + line.append(data + ' '*(col_name_length - len(data))) + lines.append('|' + '|'.join(line) + '|') + if len(lines[-1]) > max_line_length: + max_line_length = len(lines[-1]) + + # Wrap the table with '-' to make a frame + lines.insert(0, '-'*max_line_length) + lines.append('-'*max_line_length) + return '\n'.join(lines) + + + def __repr__(self): + return repr(self.data) + + def as_text(self): + data = [] + # We must convert cells into strings before joining them + for row in self.data: + data.append([str(cell) for cell in row]) + lines = ['\t'.join(row) for row in data] + return '\n'.join(lines) + + + +class TableStyle: + """Defines a table's style + """ + + def __init__(self, table): + + self._table = table + self.size = dict([(col_name, '1*') for col_name in table.col_names]) + # __row_column__ is a special key to define the first column which + # actually has no name (<=> left most column <=> row names column) + self.size['__row_column__'] = '1*' + self.alignment = dict([(col_name, 'right') + for col_name in table.col_names]) + self.alignment['__row_column__'] = 'right' + + # We shouldn't have to create an entry for + # the 1st col (the row_column one) + self.units = dict([(col_name, '') for col_name in table.col_names]) + self.units['__row_column__'] = '' + + # XXX FIXME : params order should be reversed for all set() methods + def set_size(self, value, col_id): + """sets the size of the specified col_id to value + """ + self.size[col_id] = value + + def set_size_by_index(self, value, col_index): + """Allows to set the size according to the column index rather than + using the column's id. + BE CAREFUL : the '0' column is the '__row_column__' one ! + """ + if col_index == 0: + col_id = '__row_column__' + else: + col_id = self._table.col_names[col_index-1] + + self.size[col_id] = value + + + def set_alignment(self, value, col_id): + """sets the alignment of the specified col_id to value + """ + self.alignment[col_id] = value + + + def set_alignment_by_index(self, value, col_index): + """Allows to set the alignment according to the column index rather than + using the column's id. + BE CAREFUL : the '0' column is the '__row_column__' one ! + """ + if col_index == 0: + col_id = '__row_column__' + else: + col_id = self._table.col_names[col_index-1] + + self.alignment[col_id] = value + + + def set_unit(self, value, col_id): + """sets the unit of the specified col_id to value + """ + self.units[col_id] = value + + + def set_unit_by_index(self, value, col_index): + """Allows to set the unit according to the column index rather than + using the column's id. + BE CAREFUL : the '0' column is the '__row_column__' one ! + (Note that in the 'unit' case, you shouldn't have to set a unit + for the 1st column (the __row__column__ one)) + """ + if col_index == 0: + col_id = '__row_column__' + else: + col_id = self._table.col_names[col_index-1] + + self.units[col_id] = value + + + def get_size(self, col_id): + """Returns the size of the specified col_id + """ + return self.size[col_id] + + + def get_size_by_index(self, col_index): + """Allows to get the size according to the column index rather than + using the column's id. + BE CAREFUL : the '0' column is the '__row_column__' one ! + """ + if col_index == 0: + col_id = '__row_column__' + else: + col_id = self._table.col_names[col_index-1] + + return self.size[col_id] + + + def get_alignment(self, col_id): + """Returns the alignment of the specified col_id + """ + return self.alignment[col_id] + + + def get_alignment_by_index(self, col_index): + """Allors to get the alignment according to the column index rather than + using the column's id. + BE CAREFUL : the '0' column is the '__row_column__' one ! + """ + if col_index == 0: + col_id = '__row_column__' + else: + col_id = self._table.col_names[col_index-1] + + return self.alignment[col_id] + + + def get_unit(self, col_id): + """Returns the unit of the specified col_id + """ + return self.units[col_id] + + + def get_unit_by_index(self, col_index): + """Allors to get the unit according to the column index rather than + using the column's id. + BE CAREFUL : the '0' column is the '__row_column__' one ! + """ + if col_index == 0: + col_id = '__row_column__' + else: + col_id = self._table.col_names[col_index-1] + + return self.units[col_id] + + +import re +CELL_PROG = re.compile("([0-9]+)_([0-9]+)") + +class TableStyleSheet: + """A simple Table stylesheet + Rules are expressions where cells are defined by the row_index + and col_index separated by an underscore ('_'). + For example, suppose you want to say that the (2,5) cell must be + the sum of its two preceding cells in the row, you would create + the following rule : + 2_5 = 2_3 + 2_4 + You can also use all the math.* operations you want. For example: + 2_5 = sqrt(2_3**2 + 2_4**2) + """ + + def __init__(self, rules = None): + rules = rules or [] + self.rules = [] + self.instructions = [] + for rule in rules: + self.add_rule(rule) + + + def add_rule(self, rule): + """Adds a rule to the stylesheet rules + """ + try: + source_code = ['from math import *'] + source_code.append(CELL_PROG.sub(r'self.data[\1][\2]', rule)) + self.instructions.append(compile('\n'.join(source_code), + 'table.py', 'exec')) + self.rules.append(rule) + except SyntaxError: + print("Bad Stylesheet Rule : %s [skipped]" % rule) + + + def add_rowsum_rule(self, dest_cell, row_index, start_col, end_col): + """Creates and adds a rule to sum over the row at row_index from + start_col to end_col. + dest_cell is a tuple of two elements (x,y) of the destination cell + No check is done for indexes ranges. + pre: + start_col >= 0 + end_col > start_col + """ + cell_list = ['%d_%d'%(row_index, index) for index in range(start_col, + end_col + 1)] + rule = '%d_%d=' % dest_cell + '+'.join(cell_list) + self.add_rule(rule) + + + def add_rowavg_rule(self, dest_cell, row_index, start_col, end_col): + """Creates and adds a rule to make the row average (from start_col + to end_col) + dest_cell is a tuple of two elements (x,y) of the destination cell + No check is done for indexes ranges. + pre: + start_col >= 0 + end_col > start_col + """ + cell_list = ['%d_%d'%(row_index, index) for index in range(start_col, + end_col + 1)] + num = (end_col - start_col + 1) + rule = '%d_%d=' % dest_cell + '('+'+'.join(cell_list)+')/%f'%num + self.add_rule(rule) + + + def add_colsum_rule(self, dest_cell, col_index, start_row, end_row): + """Creates and adds a rule to sum over the col at col_index from + start_row to end_row. + dest_cell is a tuple of two elements (x,y) of the destination cell + No check is done for indexes ranges. + pre: + start_row >= 0 + end_row > start_row + """ + cell_list = ['%d_%d'%(index, col_index) for index in range(start_row, + end_row + 1)] + rule = '%d_%d=' % dest_cell + '+'.join(cell_list) + self.add_rule(rule) + + + def add_colavg_rule(self, dest_cell, col_index, start_row, end_row): + """Creates and adds a rule to make the col average (from start_row + to end_row) + dest_cell is a tuple of two elements (x,y) of the destination cell + No check is done for indexes ranges. + pre: + start_row >= 0 + end_row > start_row + """ + cell_list = ['%d_%d'%(index, col_index) for index in range(start_row, + end_row + 1)] + num = (end_row - start_row + 1) + rule = '%d_%d=' % dest_cell + '('+'+'.join(cell_list)+')/%f'%num + self.add_rule(rule) + + + +class TableCellRenderer: + """Defines a simple text renderer + """ + + def __init__(self, **properties): + """keywords should be properties with an associated boolean as value. + For example : + renderer = TableCellRenderer(units = True, alignment = False) + An unspecified property will have a 'False' value by default. + Possible properties are : + alignment, unit + """ + self.properties = properties + + + def render_cell(self, cell_coord, table, table_style): + """Renders the cell at 'cell_coord' in the table, using table_style + """ + row_index, col_index = cell_coord + cell_value = table.data[row_index][col_index] + final_content = self._make_cell_content(cell_value, + table_style, col_index +1) + return self._render_cell_content(final_content, + table_style, col_index + 1) + + + def render_row_cell(self, row_name, table, table_style): + """Renders the cell for 'row_id' row + """ + cell_value = row_name + return self._render_cell_content(cell_value, table_style, 0) + + + def render_col_cell(self, col_name, table, table_style): + """Renders the cell for 'col_id' row + """ + cell_value = col_name + col_index = table.col_names.index(col_name) + return self._render_cell_content(cell_value, table_style, col_index +1) + + + + def _render_cell_content(self, content, table_style, col_index): + """Makes the appropriate rendering for this cell content. + Rendering properties will be searched using the + *table_style.get_xxx_by_index(col_index)' methods + + **This method should be overridden in the derived renderer classes.** + """ + return content + + + def _make_cell_content(self, cell_content, table_style, col_index): + """Makes the cell content (adds decoration data, like units for + example) + """ + final_content = cell_content + if 'skip_zero' in self.properties: + replacement_char = self.properties['skip_zero'] + else: + replacement_char = 0 + if replacement_char and final_content == 0: + return replacement_char + + try: + units_on = self.properties['units'] + if units_on: + final_content = self._add_unit( + cell_content, table_style, col_index) + except KeyError: + pass + + return final_content + + + def _add_unit(self, cell_content, table_style, col_index): + """Adds unit to the cell_content if needed + """ + unit = table_style.get_unit_by_index(col_index) + return str(cell_content) + " " + unit + + + +class DocbookRenderer(TableCellRenderer): + """Defines how to render a cell for a docboook table + """ + + def define_col_header(self, col_index, table_style): + """Computes the colspec element according to the style + """ + size = table_style.get_size_by_index(col_index) + return '\n' % \ + (col_index, size) + + + def _render_cell_content(self, cell_content, table_style, col_index): + """Makes the appropriate rendering for this cell content. + Rendering properties will be searched using the + table_style.get_xxx_by_index(col_index)' methods. + """ + try: + align_on = self.properties['alignment'] + alignment = table_style.get_alignment_by_index(col_index) + if align_on: + return "%s\n" % \ + (alignment, cell_content) + except KeyError: + # KeyError <=> Default alignment + return "%s\n" % cell_content + + +class TableWriter: + """A class to write tables + """ + + def __init__(self, stream, table, style, **properties): + self._stream = stream + self.style = style or TableStyle(table) + self._table = table + self.properties = properties + self.renderer = None + + + def set_style(self, style): + """sets the table's associated style + """ + self.style = style + + + def set_renderer(self, renderer): + """sets the way to render cell + """ + self.renderer = renderer + + + def update_properties(self, **properties): + """Updates writer's properties (for cell rendering) + """ + self.properties.update(properties) + + + def write_table(self, title = ""): + """Writes the table + """ + raise NotImplementedError("write_table must be implemented !") + + + +class DocbookTableWriter(TableWriter): + """Defines an implementation of TableWriter to write a table in Docbook + """ + + def _write_headers(self): + """Writes col headers + """ + # Define col_headers (colstpec elements) + for col_index in range(len(self._table.col_names)+1): + self._stream.write(self.renderer.define_col_header(col_index, + self.style)) + + self._stream.write("\n\n") + # XXX FIXME : write an empty entry <=> the first (__row_column) column + self._stream.write('\n') + for col_name in self._table.col_names: + self._stream.write(self.renderer.render_col_cell( + col_name, self._table, + self.style)) + + self._stream.write("\n\n") + + + def _write_body(self): + """Writes the table body + """ + self._stream.write('\n') + + for row_index, row in enumerate(self._table.data): + self._stream.write('\n') + row_name = self._table.row_names[row_index] + # Write the first entry (row_name) + self._stream.write(self.renderer.render_row_cell(row_name, + self._table, + self.style)) + + for col_index, cell in enumerate(row): + self._stream.write(self.renderer.render_cell( + (row_index, col_index), + self._table, self.style)) + + self._stream.write('\n') + + self._stream.write('\n') + + + def write_table(self, title = ""): + """Writes the table + """ + self._stream.write('\nCodestin Search App\n'%(title)) + self._stream.write( + '\n'% + (len(self._table.col_names)+1)) + self._write_headers() + self._write_body() + + self._stream.write('\n
\n') + + diff --git a/pymode/libs/logilab/common/tasksqueue.py b/pymode/libs/logilab/common/tasksqueue.py new file mode 100644 index 00000000..ed74cf5a --- /dev/null +++ b/pymode/libs/logilab/common/tasksqueue.py @@ -0,0 +1,101 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Prioritized tasks queue""" + +__docformat__ = "restructuredtext en" + +from bisect import insort_left + +from six.moves import queue + +LOW = 0 +MEDIUM = 10 +HIGH = 100 + +PRIORITY = { + 'LOW': LOW, + 'MEDIUM': MEDIUM, + 'HIGH': HIGH, + } +REVERSE_PRIORITY = dict((values, key) for key, values in PRIORITY.items()) + + + +class PrioritizedTasksQueue(queue.Queue): + + def _init(self, maxsize): + """Initialize the queue representation""" + self.maxsize = maxsize + # ordered list of task, from the lowest to the highest priority + self.queue = [] + + def _put(self, item): + """Put a new item in the queue""" + for i, task in enumerate(self.queue): + # equivalent task + if task == item: + # if new task has a higher priority, remove the one already + # queued so the new priority will be considered + if task < item: + item.merge(task) + del self.queue[i] + break + # else keep it so current order is kept + task.merge(item) + return + insort_left(self.queue, item) + + def _get(self): + """Get an item from the queue""" + return self.queue.pop() + + def __iter__(self): + return iter(self.queue) + + def remove(self, tid): + """remove a specific task from the queue""" + # XXX acquire lock + for i, task in enumerate(self): + if task.id == tid: + self.queue.pop(i) + return + raise ValueError('not task of id %s in queue' % tid) + +class Task(object): + def __init__(self, tid, priority=LOW): + # task id + self.id = tid + # task priority + self.priority = priority + + def __repr__(self): + return '' % (self.id, id(self)) + + def __cmp__(self, other): + return cmp(self.priority, other.priority) + + def __lt__(self, other): + return self.priority < other.priority + + def __eq__(self, other): + return self.id == other.id + + __hash__ = object.__hash__ + + def merge(self, other): + pass diff --git a/pymode/libs/logilab/common/testlib.py b/pymode/libs/logilab/common/testlib.py new file mode 100644 index 00000000..a6b4b1e1 --- /dev/null +++ b/pymode/libs/logilab/common/testlib.py @@ -0,0 +1,1338 @@ +# -*- coding: utf-8 -*- +# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Run tests. + +This will find all modules whose name match a given prefix in the test +directory, and run them. Various command line options provide +additional facilities. + +Command line options: + + -v verbose -- run tests in verbose mode with output to stdout + -q quiet -- don't print anything except if a test fails + -t testdir -- directory where the tests will be found + -x exclude -- add a test to exclude + -p profile -- profiled execution + -d dbc -- enable design-by-contract + -m match -- only run test matching the tag pattern which follow + +If no non-option arguments are present, prefixes used are 'test', +'regrtest', 'smoketest' and 'unittest'. + +""" + +from __future__ import print_function + +__docformat__ = "restructuredtext en" +# modified copy of some functions from test/regrtest.py from PyXml +# disable camel case warning +# pylint: disable=C0103 + +import sys +import os, os.path as osp +import re +import traceback +import inspect +import difflib +import tempfile +import math +import warnings +from shutil import rmtree +from operator import itemgetter +from itertools import dropwhile +from inspect import isgeneratorfunction + +from six import string_types +from six.moves import builtins, range, configparser, input + +from logilab.common.deprecation import deprecated + +import unittest as unittest_legacy +if not getattr(unittest_legacy, "__package__", None): + try: + import unittest2 as unittest + from unittest2 import SkipTest + except ImportError: + raise ImportError("You have to install python-unittest2 to use %s" % __name__) +else: + import unittest + from unittest import SkipTest + +from functools import wraps + +from logilab.common.debugger import Debugger, colorize_source +from logilab.common.decorators import cached, classproperty +from logilab.common import textutils + + +__all__ = ['main', 'unittest_main', 'find_tests', 'run_test', 'spawn'] + +DEFAULT_PREFIXES = ('test', 'regrtest', 'smoketest', 'unittest', + 'func', 'validation') + +is_generator = deprecated('[lgc 0.63] use inspect.isgeneratorfunction')(isgeneratorfunction) + +# used by unittest to count the number of relevant levels in the traceback +__unittest = 1 + + +def with_tempdir(callable): + """A decorator ensuring no temporary file left when the function return + Work only for temporary file created with the tempfile module""" + if isgeneratorfunction(callable): + def proxy(*args, **kwargs): + old_tmpdir = tempfile.gettempdir() + new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-") + tempfile.tempdir = new_tmpdir + try: + for x in callable(*args, **kwargs): + yield x + finally: + try: + rmtree(new_tmpdir, ignore_errors=True) + finally: + tempfile.tempdir = old_tmpdir + return proxy + + @wraps(callable) + def proxy(*args, **kargs): + + old_tmpdir = tempfile.gettempdir() + new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-") + tempfile.tempdir = new_tmpdir + try: + return callable(*args, **kargs) + finally: + try: + rmtree(new_tmpdir, ignore_errors=True) + finally: + tempfile.tempdir = old_tmpdir + return proxy + +def in_tempdir(callable): + """A decorator moving the enclosed function inside the tempfile.tempfdir + """ + @wraps(callable) + def proxy(*args, **kargs): + + old_cwd = os.getcwd() + os.chdir(tempfile.tempdir) + try: + return callable(*args, **kargs) + finally: + os.chdir(old_cwd) + return proxy + +def within_tempdir(callable): + """A decorator run the enclosed function inside a tmpdir removed after execution + """ + proxy = with_tempdir(in_tempdir(callable)) + proxy.__name__ = callable.__name__ + return proxy + +def find_tests(testdir, + prefixes=DEFAULT_PREFIXES, suffix=".py", + excludes=(), + remove_suffix=True): + """ + Return a list of all applicable test modules. + """ + tests = [] + for name in os.listdir(testdir): + if not suffix or name.endswith(suffix): + for prefix in prefixes: + if name.startswith(prefix): + if remove_suffix and name.endswith(suffix): + name = name[:-len(suffix)] + if name not in excludes: + tests.append(name) + tests.sort() + return tests + + +## PostMortem Debug facilities ##### +def start_interactive_mode(result): + """starts an interactive shell so that the user can inspect errors + """ + debuggers = result.debuggers + descrs = result.error_descrs + result.fail_descrs + if len(debuggers) == 1: + # don't ask for test name if there's only one failure + debuggers[0].start() + else: + while True: + testindex = 0 + print("Choose a test to debug:") + # order debuggers in the same way than errors were printed + print("\n".join(['\t%s : %s' % (i, descr) for i, (_, descr) + in enumerate(descrs)])) + print("Type 'exit' (or ^D) to quit") + print() + try: + todebug = input('Enter a test name: ') + if todebug.strip().lower() == 'exit': + print() + break + else: + try: + testindex = int(todebug) + debugger = debuggers[descrs[testindex][0]] + except (ValueError, IndexError): + print("ERROR: invalid test number %r" % (todebug, )) + else: + debugger.start() + except (EOFError, KeyboardInterrupt): + print() + break + + +# test utils ################################################################## + +class SkipAwareTestResult(unittest._TextTestResult): + + def __init__(self, stream, descriptions, verbosity, + exitfirst=False, pdbmode=False, cvg=None, colorize=False): + super(SkipAwareTestResult, self).__init__(stream, + descriptions, verbosity) + self.skipped = [] + self.debuggers = [] + self.fail_descrs = [] + self.error_descrs = [] + self.exitfirst = exitfirst + self.pdbmode = pdbmode + self.cvg = cvg + self.colorize = colorize + self.pdbclass = Debugger + self.verbose = verbosity > 1 + + def descrs_for(self, flavour): + return getattr(self, '%s_descrs' % flavour.lower()) + + def _create_pdb(self, test_descr, flavour): + self.descrs_for(flavour).append( (len(self.debuggers), test_descr) ) + if self.pdbmode: + self.debuggers.append(self.pdbclass(sys.exc_info()[2])) + + def _iter_valid_frames(self, frames): + """only consider non-testlib frames when formatting traceback""" + lgc_testlib = osp.abspath(__file__) + std_testlib = osp.abspath(unittest.__file__) + invalid = lambda fi: osp.abspath(fi[1]) in (lgc_testlib, std_testlib) + for frameinfo in dropwhile(invalid, frames): + yield frameinfo + + def _exc_info_to_string(self, err, test): + """Converts a sys.exc_info()-style tuple of values into a string. + + This method is overridden here because we want to colorize + lines if --color is passed, and display local variables if + --verbose is passed + """ + exctype, exc, tb = err + output = ['Traceback (most recent call last)'] + frames = inspect.getinnerframes(tb) + colorize = self.colorize + frames = enumerate(self._iter_valid_frames(frames)) + for index, (frame, filename, lineno, funcname, ctx, ctxindex) in frames: + filename = osp.abspath(filename) + if ctx is None: # pyc files or C extensions for instance + source = '' + else: + source = ''.join(ctx) + if colorize: + filename = textutils.colorize_ansi(filename, 'magenta') + source = colorize_source(source) + output.append(' File "%s", line %s, in %s' % (filename, lineno, funcname)) + output.append(' %s' % source.strip()) + if self.verbose: + output.append('%r == %r' % (dir(frame), test.__module__)) + output.append('') + output.append(' ' + ' local variables '.center(66, '-')) + for varname, value in sorted(frame.f_locals.items()): + output.append(' %s: %r' % (varname, value)) + if varname == 'self': # special handy processing for self + for varname, value in sorted(vars(value).items()): + output.append(' self.%s: %r' % (varname, value)) + output.append(' ' + '-' * 66) + output.append('') + output.append(''.join(traceback.format_exception_only(exctype, exc))) + return '\n'.join(output) + + def addError(self, test, err): + """err -> (exc_type, exc, tcbk)""" + exc_type, exc, _ = err + if isinstance(exc, SkipTest): + assert exc_type == SkipTest + self.addSkip(test, exc) + else: + if self.exitfirst: + self.shouldStop = True + descr = self.getDescription(test) + super(SkipAwareTestResult, self).addError(test, err) + self._create_pdb(descr, 'error') + + def addFailure(self, test, err): + if self.exitfirst: + self.shouldStop = True + descr = self.getDescription(test) + super(SkipAwareTestResult, self).addFailure(test, err) + self._create_pdb(descr, 'fail') + + def addSkip(self, test, reason): + self.skipped.append((test, reason)) + if self.showAll: + self.stream.writeln("SKIPPED") + elif self.dots: + self.stream.write('S') + + def printErrors(self): + super(SkipAwareTestResult, self).printErrors() + self.printSkippedList() + + def printSkippedList(self): + # format (test, err) compatible with unittest2 + for test, err in self.skipped: + descr = self.getDescription(test) + self.stream.writeln(self.separator1) + self.stream.writeln("%s: %s" % ('SKIPPED', descr)) + self.stream.writeln("\t%s" % err) + + def printErrorList(self, flavour, errors): + for (_, descr), (test, err) in zip(self.descrs_for(flavour), errors): + self.stream.writeln(self.separator1) + self.stream.writeln("%s: %s" % (flavour, descr)) + self.stream.writeln(self.separator2) + self.stream.writeln(err) + self.stream.writeln('no stdout'.center(len(self.separator2))) + self.stream.writeln('no stderr'.center(len(self.separator2))) + +# Add deprecation warnings about new api used by module level fixtures in unittest2 +# http://www.voidspace.org.uk/python/articles/unittest2.shtml#setupmodule-and-teardownmodule +class _DebugResult(object): # simplify import statement among unittest flavors.. + "Used by the TestSuite to hold previous class when running in debug." + _previousTestClass = None + _moduleSetUpFailed = False + shouldStop = False + +# backward compatibility: TestSuite might be imported from lgc.testlib +TestSuite = unittest.TestSuite + +class keywords(dict): + """Keyword args (**kwargs) support for generative tests.""" + +class starargs(tuple): + """Variable arguments (*args) for generative tests.""" + def __new__(cls, *args): + return tuple.__new__(cls, args) + +unittest_main = unittest.main + + +class InnerTestSkipped(SkipTest): + """raised when a test is skipped""" + pass + +def parse_generative_args(params): + args = [] + varargs = () + kwargs = {} + flags = 0 # 2 <=> starargs, 4 <=> kwargs + for param in params: + if isinstance(param, starargs): + varargs = param + if flags: + raise TypeError('found starargs after keywords !') + flags |= 2 + args += list(varargs) + elif isinstance(param, keywords): + kwargs = param + if flags & 4: + raise TypeError('got multiple keywords parameters') + flags |= 4 + elif flags & 2 or flags & 4: + raise TypeError('found parameters after kwargs or args') + else: + args.append(param) + + return args, kwargs + + +class InnerTest(tuple): + def __new__(cls, name, *data): + instance = tuple.__new__(cls, data) + instance.name = name + return instance + +class Tags(set): + """A set of tag able validate an expression""" + + def __init__(self, *tags, **kwargs): + self.inherit = kwargs.pop('inherit', True) + if kwargs: + raise TypeError("%s are an invalid keyword argument for this function" % kwargs.keys()) + + if len(tags) == 1 and not isinstance(tags[0], string_types): + tags = tags[0] + super(Tags, self).__init__(tags, **kwargs) + + def __getitem__(self, key): + return key in self + + def match(self, exp): + return eval(exp, {}, self) + + def __or__(self, other): + return Tags(*super(Tags, self).__or__(other)) + + +# duplicate definition from unittest2 of the _deprecate decorator +def _deprecate(original_func): + def deprecated_func(*args, **kwargs): + warnings.warn( + ('Please use %s instead.' % original_func.__name__), + DeprecationWarning, 2) + return original_func(*args, **kwargs) + return deprecated_func + +class TestCase(unittest.TestCase): + """A unittest.TestCase extension with some additional methods.""" + maxDiff = None + pdbclass = Debugger + tags = Tags() + + def __init__(self, methodName='runTest'): + super(TestCase, self).__init__(methodName) + self.__exc_info = sys.exc_info + self.__testMethodName = self._testMethodName + self._current_test_descr = None + self._options_ = None + + @classproperty + @cached + def datadir(cls): # pylint: disable=E0213 + """helper attribute holding the standard test's data directory + + NOTE: this is a logilab's standard + """ + mod = sys.modules[cls.__module__] + return osp.join(osp.dirname(osp.abspath(mod.__file__)), 'data') + # cache it (use a class method to cache on class since TestCase is + # instantiated for each test run) + + @classmethod + def datapath(cls, *fname): + """joins the object's datadir and `fname`""" + return osp.join(cls.datadir, *fname) + + def set_description(self, descr): + """sets the current test's description. + This can be useful for generative tests because it allows to specify + a description per yield + """ + self._current_test_descr = descr + + # override default's unittest.py feature + def shortDescription(self): + """override default unittest shortDescription to handle correctly + generative tests + """ + if self._current_test_descr is not None: + return self._current_test_descr + return super(TestCase, self).shortDescription() + + def quiet_run(self, result, func, *args, **kwargs): + try: + func(*args, **kwargs) + except (KeyboardInterrupt, SystemExit): + raise + except unittest.SkipTest as e: + if hasattr(result, 'addSkip'): + result.addSkip(self, str(e)) + else: + warnings.warn("TestResult has no addSkip method, skips not reported", + RuntimeWarning, 2) + result.addSuccess(self) + return False + except: + result.addError(self, self.__exc_info()) + return False + return True + + def _get_test_method(self): + """return the test method""" + return getattr(self, self._testMethodName) + + def optval(self, option, default=None): + """return the option value or default if the option is not define""" + return getattr(self._options_, option, default) + + def __call__(self, result=None, runcondition=None, options=None): + """rewrite TestCase.__call__ to support generative tests + This is mostly a copy/paste from unittest.py (i.e same + variable names, same logic, except for the generative tests part) + """ + from logilab.common.pytest import FILE_RESTART + if result is None: + result = self.defaultTestResult() + result.pdbclass = self.pdbclass + self._options_ = options + # if result.cvg: + # result.cvg.start() + testMethod = self._get_test_method() + if (getattr(self.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + try: + skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') + or getattr(testMethod, '__unittest_skip_why__', '')) + self._addSkip(result, skip_why) + finally: + result.stopTest(self) + return + if runcondition and not runcondition(testMethod): + return # test is skipped + result.startTest(self) + try: + if not self.quiet_run(result, self.setUp): + return + generative = isgeneratorfunction(testMethod) + # generative tests + if generative: + self._proceed_generative(result, testMethod, + runcondition) + else: + status = self._proceed(result, testMethod) + success = (status == 0) + if not self.quiet_run(result, self.tearDown): + return + if not generative and success: + if hasattr(options, "exitfirst") and options.exitfirst: + # add this test to restart file + try: + restartfile = open(FILE_RESTART, 'a') + try: + descr = '.'.join((self.__class__.__module__, + self.__class__.__name__, + self._testMethodName)) + restartfile.write(descr+os.linesep) + finally: + restartfile.close() + except Exception: + print("Error while saving succeeded test into", + osp.join(os.getcwd(), FILE_RESTART), + file=sys.__stderr__) + raise + result.addSuccess(self) + finally: + # if result.cvg: + # result.cvg.stop() + result.stopTest(self) + + def _proceed_generative(self, result, testfunc, runcondition=None): + # cancel startTest()'s increment + result.testsRun -= 1 + success = True + try: + for params in testfunc(): + if runcondition and not runcondition(testfunc, + skipgenerator=False): + if not (isinstance(params, InnerTest) + and runcondition(params)): + continue + if not isinstance(params, (tuple, list)): + params = (params, ) + func = params[0] + args, kwargs = parse_generative_args(params[1:]) + # increment test counter manually + result.testsRun += 1 + status = self._proceed(result, func, args, kwargs) + if status == 0: + result.addSuccess(self) + success = True + else: + success = False + # XXX Don't stop anymore if an error occured + #if status == 2: + # result.shouldStop = True + if result.shouldStop: # either on error or on exitfirst + error + break + except: + # if an error occurs between two yield + result.addError(self, self.__exc_info()) + success = False + return success + + def _proceed(self, result, testfunc, args=(), kwargs=None): + """proceed the actual test + returns 0 on success, 1 on failure, 2 on error + + Note: addSuccess can't be called here because we have to wait + for tearDown to be successfully executed to declare the test as + successful + """ + kwargs = kwargs or {} + try: + testfunc(*args, **kwargs) + except self.failureException: + result.addFailure(self, self.__exc_info()) + return 1 + except KeyboardInterrupt: + raise + except InnerTestSkipped as e: + result.addSkip(self, e) + return 1 + except SkipTest as e: + result.addSkip(self, e) + return 0 + except: + result.addError(self, self.__exc_info()) + return 2 + return 0 + + def defaultTestResult(self): + """return a new instance of the defaultTestResult""" + return SkipAwareTestResult() + + skip = _deprecate(unittest.TestCase.skipTest) + assertEquals = _deprecate(unittest.TestCase.assertEqual) + assertNotEquals = _deprecate(unittest.TestCase.assertNotEqual) + assertAlmostEquals = _deprecate(unittest.TestCase.assertAlmostEqual) + assertNotAlmostEquals = _deprecate(unittest.TestCase.assertNotAlmostEqual) + + def innerSkip(self, msg=None): + """mark a generative test as skipped for the reason""" + msg = msg or 'test was skipped' + raise InnerTestSkipped(msg) + + @deprecated('Please use assertDictEqual instead.') + def assertDictEquals(self, dict1, dict2, msg=None, context=None): + """compares two dicts + + If the two dict differ, the first difference is shown in the error + message + :param dict1: a Python Dictionary + :param dict2: a Python Dictionary + :param msg: custom message (String) in case of failure + """ + dict1 = dict(dict1) + msgs = [] + for key, value in dict2.items(): + try: + if dict1[key] != value: + msgs.append('%r != %r for key %r' % (dict1[key], value, + key)) + del dict1[key] + except KeyError: + msgs.append('missing %r key' % key) + if dict1: + msgs.append('dict2 is lacking %r' % dict1) + if msg: + self.failureException(msg) + elif msgs: + if context is not None: + base = '%s\n' % context + else: + base = '' + self.fail(base + '\n'.join(msgs)) + + @deprecated('Please use assertCountEqual instead.') + def assertUnorderedIterableEquals(self, got, expected, msg=None): + """compares two iterable and shows difference between both + + :param got: the unordered Iterable that we found + :param expected: the expected unordered Iterable + :param msg: custom message (String) in case of failure + """ + got, expected = list(got), list(expected) + self.assertSetEqual(set(got), set(expected), msg) + if len(got) != len(expected): + if msg is None: + msg = ['Iterable have the same elements but not the same number', + '\t\ti\t'] + got_count = {} + expected_count = {} + for element in got: + got_count[element] = got_count.get(element, 0) + 1 + for element in expected: + expected_count[element] = expected_count.get(element, 0) + 1 + # we know that got_count.key() == expected_count.key() + # because of assertSetEqual + for element, count in got_count.iteritems(): + other_count = expected_count[element] + if other_count != count: + msg.append('\t%s\t%s\t%s' % (element, other_count, count)) + + self.fail(msg) + + assertUnorderedIterableEqual = assertUnorderedIterableEquals + assertUnordIterEquals = assertUnordIterEqual = assertUnorderedIterableEqual + + @deprecated('Please use assertSetEqual instead.') + def assertSetEquals(self,got,expected, msg=None): + """compares two sets and shows difference between both + + Don't use it for iterables other than sets. + + :param got: the Set that we found + :param expected: the second Set to be compared to the first one + :param msg: custom message (String) in case of failure + """ + + if not(isinstance(got, set) and isinstance(expected, set)): + warnings.warn("the assertSetEquals function if now intended for set only."\ + "use assertUnorderedIterableEquals instead.", + DeprecationWarning, 2) + return self.assertUnorderedIterableEquals(got, expected, msg) + + items={} + items['missing'] = expected - got + items['unexpected'] = got - expected + if any(items.itervalues()): + if msg is None: + msg = '\n'.join('%s:\n\t%s' % (key, "\n\t".join(str(value) for value in values)) + for key, values in items.iteritems() if values) + self.fail(msg) + + @deprecated('Please use assertListEqual instead.') + def assertListEquals(self, list_1, list_2, msg=None): + """compares two lists + + If the two list differ, the first difference is shown in the error + message + + :param list_1: a Python List + :param list_2: a second Python List + :param msg: custom message (String) in case of failure + """ + _l1 = list_1[:] + for i, value in enumerate(list_2): + try: + if _l1[0] != value: + from pprint import pprint + pprint(list_1) + pprint(list_2) + self.fail('%r != %r for index %d' % (_l1[0], value, i)) + del _l1[0] + except IndexError: + if msg is None: + msg = 'list_1 has only %d elements, not %s '\ + '(at least %r missing)'% (i, len(list_2), value) + self.fail(msg) + if _l1: + if msg is None: + msg = 'list_2 is lacking %r' % _l1 + self.fail(msg) + + @deprecated('Non-standard. Please use assertMultiLineEqual instead.') + def assertLinesEquals(self, string1, string2, msg=None, striplines=False): + """compare two strings and assert that the text lines of the strings + are equal. + + :param string1: a String + :param string2: a String + :param msg: custom message (String) in case of failure + :param striplines: Boolean to trigger line stripping before comparing + """ + lines1 = string1.splitlines() + lines2 = string2.splitlines() + if striplines: + lines1 = [l.strip() for l in lines1] + lines2 = [l.strip() for l in lines2] + self.assertListEqual(lines1, lines2, msg) + assertLineEqual = assertLinesEquals + + @deprecated('Non-standard: please copy test method to your TestCase class') + def assertXMLWellFormed(self, stream, msg=None, context=2): + """asserts the XML stream is well-formed (no DTD conformance check) + + :param context: number of context lines in standard message + (show all data if negative). + Only available with element tree + """ + try: + from xml.etree.ElementTree import parse + self._assertETXMLWellFormed(stream, parse, msg) + except ImportError: + from xml.sax import make_parser, SAXParseException + parser = make_parser() + try: + parser.parse(stream) + except SAXParseException as ex: + if msg is None: + stream.seek(0) + for _ in range(ex.getLineNumber()): + line = stream.readline() + pointer = ('' * (ex.getLineNumber() - 1)) + '^' + msg = 'XML stream not well formed: %s\n%s%s' % (ex, line, pointer) + self.fail(msg) + + @deprecated('Non-standard: please copy test method to your TestCase class') + def assertXMLStringWellFormed(self, xml_string, msg=None, context=2): + """asserts the XML string is well-formed (no DTD conformance check) + + :param context: number of context lines in standard message + (show all data if negative). + Only available with element tree + """ + try: + from xml.etree.ElementTree import fromstring + except ImportError: + from elementtree.ElementTree import fromstring + self._assertETXMLWellFormed(xml_string, fromstring, msg) + + def _assertETXMLWellFormed(self, data, parse, msg=None, context=2): + """internal function used by /assertXML(String)?WellFormed/ functions + + :param data: xml_data + :param parse: appropriate parser function for this data + :param msg: error message + :param context: number of context lines in standard message + (show all data if negative). + Only available with element tree + """ + from xml.parsers.expat import ExpatError + try: + from xml.etree.ElementTree import ParseError + except ImportError: + # compatibility for 1: + if len(tup)<=1: + self.fail( "tuple %s has no attributes (%s expected)"%(tup, + dict(element.attrib))) + self.assertDictEqual(element.attrib, tup[1]) + # check children + if len(element) or len(tup)>2: + if len(tup)<=2: + self.fail( "tuple %s has no children (%i expected)"%(tup, + len(element))) + if len(element) != len(tup[2]): + self.fail( "tuple %s has %i children%s (%i expected)"%(tup, + len(tup[2]), + ('', 's')[len(tup[2])>1], len(element))) + for index in range(len(tup[2])): + self.assertXMLEqualsTuple(element[index], tup[2][index]) + #check text + if element.text or len(tup)>3: + if len(tup)<=3: + self.fail( "tuple %s has no text value (%r expected)"%(tup, + element.text)) + self.assertTextEquals(element.text, tup[3]) + #check tail + if element.tail or len(tup)>4: + if len(tup)<=4: + self.fail( "tuple %s has no tail value (%r expected)"%(tup, + element.tail)) + self.assertTextEquals(element.tail, tup[4]) + + def _difftext(self, lines1, lines2, junk=None, msg_prefix='Texts differ'): + junk = junk or (' ', '\t') + # result is a generator + result = difflib.ndiff(lines1, lines2, charjunk=lambda x: x in junk) + read = [] + for line in result: + read.append(line) + # lines that don't start with a ' ' are diff ones + if not line.startswith(' '): + self.fail('\n'.join(['%s\n'%msg_prefix]+read + list(result))) + + @deprecated('Non-standard. Please use assertMultiLineEqual instead.') + def assertTextEquals(self, text1, text2, junk=None, + msg_prefix='Text differ', striplines=False): + """compare two multiline strings (using difflib and splitlines()) + + :param text1: a Python BaseString + :param text2: a second Python Basestring + :param junk: List of Caracters + :param msg_prefix: String (message prefix) + :param striplines: Boolean to trigger line stripping before comparing + """ + msg = [] + if not isinstance(text1, string_types): + msg.append('text1 is not a string (%s)'%(type(text1))) + if not isinstance(text2, string_types): + msg.append('text2 is not a string (%s)'%(type(text2))) + if msg: + self.fail('\n'.join(msg)) + lines1 = text1.strip().splitlines(True) + lines2 = text2.strip().splitlines(True) + if striplines: + lines1 = [line.strip() for line in lines1] + lines2 = [line.strip() for line in lines2] + self._difftext(lines1, lines2, junk, msg_prefix) + assertTextEqual = assertTextEquals + + @deprecated('Non-standard: please copy test method to your TestCase class') + def assertStreamEquals(self, stream1, stream2, junk=None, + msg_prefix='Stream differ'): + """compare two streams (using difflib and readlines())""" + # if stream2 is stream2, readlines() on stream1 will also read lines + # in stream2, so they'll appear different, although they're not + if stream1 is stream2: + return + # make sure we compare from the beginning of the stream + stream1.seek(0) + stream2.seek(0) + # compare + self._difftext(stream1.readlines(), stream2.readlines(), junk, + msg_prefix) + + assertStreamEqual = assertStreamEquals + + @deprecated('Non-standard: please copy test method to your TestCase class') + def assertFileEquals(self, fname1, fname2, junk=(' ', '\t')): + """compares two files using difflib""" + self.assertStreamEqual(open(fname1), open(fname2), junk, + msg_prefix='Files differs\n-:%s\n+:%s\n'%(fname1, fname2)) + + assertFileEqual = assertFileEquals + + @deprecated('Non-standard: please copy test method to your TestCase class') + def assertDirEquals(self, path_a, path_b): + """compares two files using difflib""" + assert osp.exists(path_a), "%s doesn't exists" % path_a + assert osp.exists(path_b), "%s doesn't exists" % path_b + + all_a = [ (ipath[len(path_a):].lstrip('/'), idirs, ifiles) + for ipath, idirs, ifiles in os.walk(path_a)] + all_a.sort(key=itemgetter(0)) + + all_b = [ (ipath[len(path_b):].lstrip('/'), idirs, ifiles) + for ipath, idirs, ifiles in os.walk(path_b)] + all_b.sort(key=itemgetter(0)) + + iter_a, iter_b = iter(all_a), iter(all_b) + partial_iter = True + ipath_a, idirs_a, ifiles_a = data_a = None, None, None + while True: + try: + ipath_a, idirs_a, ifiles_a = datas_a = next(iter_a) + partial_iter = False + ipath_b, idirs_b, ifiles_b = datas_b = next(iter_b) + partial_iter = True + + + self.assertTrue(ipath_a == ipath_b, + "unexpected %s in %s while looking %s from %s" % + (ipath_a, path_a, ipath_b, path_b)) + + + errors = {} + sdirs_a = set(idirs_a) + sdirs_b = set(idirs_b) + errors["unexpected directories"] = sdirs_a - sdirs_b + errors["missing directories"] = sdirs_b - sdirs_a + + sfiles_a = set(ifiles_a) + sfiles_b = set(ifiles_b) + errors["unexpected files"] = sfiles_a - sfiles_b + errors["missing files"] = sfiles_b - sfiles_a + + + msgs = [ "%s: %s"% (name, items) + for name, items in errors.items() if items] + + if msgs: + msgs.insert(0, "%s and %s differ :" % ( + osp.join(path_a, ipath_a), + osp.join(path_b, ipath_b), + )) + self.fail("\n".join(msgs)) + + for files in (ifiles_a, ifiles_b): + files.sort() + + for index, path in enumerate(ifiles_a): + self.assertFileEquals(osp.join(path_a, ipath_a, path), + osp.join(path_b, ipath_b, ifiles_b[index])) + + except StopIteration: + break + + assertDirEqual = assertDirEquals + + def assertIsInstance(self, obj, klass, msg=None, strict=False): + """check if an object is an instance of a class + + :param obj: the Python Object to be checked + :param klass: the target class + :param msg: a String for a custom message + :param strict: if True, check that the class of is ; + else check with 'isinstance' + """ + if strict: + warnings.warn('[API] Non-standard. Strict parameter has vanished', + DeprecationWarning, stacklevel=2) + if msg is None: + if strict: + msg = '%r is not of class %s but of %s' + else: + msg = '%r is not an instance of %s but of %s' + msg = msg % (obj, klass, type(obj)) + if strict: + self.assertTrue(obj.__class__ is klass, msg) + else: + self.assertTrue(isinstance(obj, klass), msg) + + @deprecated('Please use assertIsNone instead.') + def assertNone(self, obj, msg=None): + """assert obj is None + + :param obj: Python Object to be tested + """ + if msg is None: + msg = "reference to %r when None expected"%(obj,) + self.assertTrue( obj is None, msg ) + + @deprecated('Please use assertIsNotNone instead.') + def assertNotNone(self, obj, msg=None): + """assert obj is not None""" + if msg is None: + msg = "unexpected reference to None" + self.assertTrue( obj is not None, msg ) + + @deprecated('Non-standard. Please use assertAlmostEqual instead.') + def assertFloatAlmostEquals(self, obj, other, prec=1e-5, + relative=False, msg=None): + """compares if two floats have a distance smaller than expected + precision. + + :param obj: a Float + :param other: another Float to be comparted to + :param prec: a Float describing the precision + :param relative: boolean switching to relative/absolute precision + :param msg: a String for a custom message + """ + if msg is None: + msg = "%r != %r" % (obj, other) + if relative: + prec = prec*math.fabs(obj) + self.assertTrue(math.fabs(obj - other) < prec, msg) + + def failUnlessRaises(self, excClass, callableObj=None, *args, **kwargs): + """override default failUnlessRaises method to return the raised + exception instance. + + Fail unless an exception of class excClass is thrown + by callableObj when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + CAUTION! There are subtle differences between Logilab and unittest2 + - exc is not returned in standard version + - context capabilities in standard version + - try/except/else construction (minor) + + :param excClass: the Exception to be raised + :param callableObj: a callable Object which should raise + :param args: a List of arguments for + :param kwargs: a List of keyword arguments for + """ + # XXX cube vcslib : test_branches_from_app + if callableObj is None: + _assert = super(TestCase, self).assertRaises + return _assert(excClass, callableObj, *args, **kwargs) + try: + callableObj(*args, **kwargs) + except excClass as exc: + class ProxyException: + def __init__(self, obj): + self._obj = obj + def __getattr__(self, attr): + warn_msg = ("This exception was retrieved with the old testlib way " + "`exc = self.assertRaises(Exc, callable)`, please use " + "the context manager instead'") + warnings.warn(warn_msg, DeprecationWarning, 2) + return self._obj.__getattribute__(attr) + return ProxyException(exc) + else: + if hasattr(excClass, '__name__'): + excName = excClass.__name__ + else: + excName = str(excClass) + raise self.failureException("%s not raised" % excName) + + assertRaises = failUnlessRaises + + if sys.version_info >= (3,2): + assertItemsEqual = unittest.TestCase.assertCountEqual + else: + assertCountEqual = unittest.TestCase.assertItemsEqual + if sys.version_info < (2,7): + def assertIsNotNone(self, value, *args, **kwargs): + self.assertNotEqual(None, value, *args, **kwargs) + +TestCase.assertItemsEqual = deprecated('assertItemsEqual is deprecated, use assertCountEqual')( + TestCase.assertItemsEqual) + +import doctest + +class SkippedSuite(unittest.TestSuite): + def test(self): + """just there to trigger test execution""" + self.skipped_test('doctest module has no DocTestSuite class') + + +class DocTestFinder(doctest.DocTestFinder): + + def __init__(self, *args, **kwargs): + self.skipped = kwargs.pop('skipped', ()) + doctest.DocTestFinder.__init__(self, *args, **kwargs) + + def _get_test(self, obj, name, module, globs, source_lines): + """override default _get_test method to be able to skip tests + according to skipped attribute's value + """ + if getattr(obj, '__name__', '') in self.skipped: + return None + return doctest.DocTestFinder._get_test(self, obj, name, module, + globs, source_lines) + + +class DocTest(TestCase): + """trigger module doctest + I don't know how to make unittest.main consider the DocTestSuite instance + without this hack + """ + skipped = () + def __call__(self, result=None, runcondition=None, options=None):\ + # pylint: disable=W0613 + try: + finder = DocTestFinder(skipped=self.skipped) + suite = doctest.DocTestSuite(self.module, test_finder=finder) + # XXX iirk + doctest.DocTestCase._TestCase__exc_info = sys.exc_info + except AttributeError: + suite = SkippedSuite() + # doctest may gork the builtins dictionnary + # This happen to the "_" entry used by gettext + old_builtins = builtins.__dict__.copy() + try: + return suite.run(result) + finally: + builtins.__dict__.clear() + builtins.__dict__.update(old_builtins) + run = __call__ + + def test(self): + """just there to trigger test execution""" + +MAILBOX = None + +class MockSMTP: + """fake smtplib.SMTP""" + + def __init__(self, host, port): + self.host = host + self.port = port + global MAILBOX + self.reveived = MAILBOX = [] + + def set_debuglevel(self, debuglevel): + """ignore debug level""" + + def sendmail(self, fromaddr, toaddres, body): + """push sent mail in the mailbox""" + self.reveived.append((fromaddr, toaddres, body)) + + def quit(self): + """ignore quit""" + + +class MockConfigParser(configparser.ConfigParser): + """fake ConfigParser.ConfigParser""" + + def __init__(self, options): + configparser.ConfigParser.__init__(self) + for section, pairs in options.iteritems(): + self.add_section(section) + for key, value in pairs.iteritems(): + self.set(section, key, value) + def write(self, _): + raise NotImplementedError() + + +class MockConnection: + """fake DB-API 2.0 connexion AND cursor (i.e. cursor() return self)""" + + def __init__(self, results): + self.received = [] + self.states = [] + self.results = results + + def cursor(self): + """Mock cursor method""" + return self + def execute(self, query, args=None): + """Mock execute method""" + self.received.append( (query, args) ) + def fetchone(self): + """Mock fetchone method""" + return self.results[0] + def fetchall(self): + """Mock fetchall method""" + return self.results + def commit(self): + """Mock commiy method""" + self.states.append( ('commit', len(self.received)) ) + def rollback(self): + """Mock rollback method""" + self.states.append( ('rollback', len(self.received)) ) + def close(self): + """Mock close method""" + pass + + +def mock_object(**params): + """creates an object using params to set attributes + >>> option = mock_object(verbose=False, index=range(5)) + >>> option.verbose + False + >>> option.index + [0, 1, 2, 3, 4] + """ + return type('Mock', (), params)() + + +def create_files(paths, chroot): + """Creates directories and files found in . + + :param paths: list of relative paths to files or directories + :param chroot: the root directory in which paths will be created + + >>> from os.path import isdir, isfile + >>> isdir('/tmp/a') + False + >>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp') + >>> isdir('/tmp/a') + True + >>> isdir('/tmp/a/b/c') + True + >>> isfile('/tmp/a/b/c/d/e.py') + True + >>> isfile('/tmp/a/b/foo.py') + True + """ + dirs, files = set(), set() + for path in paths: + path = osp.join(chroot, path) + filename = osp.basename(path) + # path is a directory path + if filename == '': + dirs.add(path) + # path is a filename path + else: + dirs.add(osp.dirname(path)) + files.add(path) + for dirpath in dirs: + if not osp.isdir(dirpath): + os.makedirs(dirpath) + for filepath in files: + open(filepath, 'w').close() + + +class AttrObject: # XXX cf mock_object + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + +def tag(*args, **kwargs): + """descriptor adding tag to a function""" + def desc(func): + assert not hasattr(func, 'tags') + func.tags = Tags(*args, **kwargs) + return func + return desc + +def require_version(version): + """ Compare version of python interpreter to the given one. Skip the test + if older. + """ + def check_require_version(f): + version_elements = version.split('.') + try: + compare = tuple([int(v) for v in version_elements]) + except ValueError: + raise ValueError('%s is not a correct version : should be X.Y[.Z].' % version) + current = sys.version_info[:3] + if current < compare: + def new_f(self, *args, **kwargs): + self.skipTest('Need at least %s version of python. Current version is %s.' % (version, '.'.join([str(element) for element in current]))) + new_f.__name__ = f.__name__ + return new_f + else: + return f + return check_require_version + +def require_module(module): + """ Check if the given module is loaded. Skip the test if not. + """ + def check_require_module(f): + try: + __import__(module) + return f + except ImportError: + def new_f(self, *args, **kwargs): + self.skipTest('%s can not be imported.' % module) + new_f.__name__ = f.__name__ + return new_f + return check_require_module + diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/textutils.py b/pymode/libs/logilab/common/textutils.py similarity index 99% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/textutils.py rename to pymode/libs/logilab/common/textutils.py index f55c0040..9046f975 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/textutils.py +++ b/pymode/libs/logilab/common/textutils.py @@ -284,11 +284,14 @@ def text_to_dict(text): dict of {'key': 'value'}. When the same key is encountered multiple time, value is turned into a list containing all values. - >>> text_to_dict('''multiple=1 + >>> d = text_to_dict('''multiple=1 ... multiple= 2 ... single =3 ... ''') - {'single': '3', 'multiple': ['1', '2']} + >>> d['single'] + '3' + >>> d['multiple'] + ['1', '2'] """ res = {} diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/tree.py b/pymode/libs/logilab/common/tree.py similarity index 100% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/tree.py rename to pymode/libs/logilab/common/tree.py diff --git a/pymode/libs/logilab/common/umessage.py b/pymode/libs/logilab/common/umessage.py new file mode 100644 index 00000000..a5e47995 --- /dev/null +++ b/pymode/libs/logilab/common/umessage.py @@ -0,0 +1,194 @@ +# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Unicode email support (extends email from stdlib)""" + +__docformat__ = "restructuredtext en" + +import email +from encodings import search_function +import sys +if sys.version_info >= (2, 5): + from email.utils import parseaddr, parsedate + from email.header import decode_header +else: + from email.Utils import parseaddr, parsedate + from email.Header import decode_header + +from datetime import datetime + +try: + from mx.DateTime import DateTime +except ImportError: + DateTime = datetime + +import logilab.common as lgc + + +def decode_QP(string): + parts = [] + for decoded, charset in decode_header(string): + if not charset : + charset = 'iso-8859-15' + parts.append(decoded.decode(charset, 'replace')) + + if sys.version_info < (3, 3): + # decoding was non-RFC compliant wrt to whitespace handling + # see http://bugs.python.org/issue1079 + return u' '.join(parts) + return u''.join(parts) + +def message_from_file(fd): + try: + return UMessage(email.message_from_file(fd)) + except email.Errors.MessageParseError: + return '' + +def message_from_string(string): + try: + return UMessage(email.message_from_string(string)) + except email.Errors.MessageParseError: + return '' + +class UMessage: + """Encapsulates an email.Message instance and returns only unicode objects. + """ + + def __init__(self, message): + self.message = message + + # email.Message interface ################################################# + + def get(self, header, default=None): + value = self.message.get(header, default) + if value: + return decode_QP(value) + return value + + def __getitem__(self, header): + return self.get(header) + + def get_all(self, header, default=()): + return [decode_QP(val) for val in self.message.get_all(header, default) + if val is not None] + + def is_multipart(self): + return self.message.is_multipart() + + def get_boundary(self): + return self.message.get_boundary() + + def walk(self): + for part in self.message.walk(): + yield UMessage(part) + + if sys.version_info < (3, 0): + + def get_payload(self, index=None, decode=False): + message = self.message + if index is None: + payload = message.get_payload(index, decode) + if isinstance(payload, list): + return [UMessage(msg) for msg in payload] + if message.get_content_maintype() != 'text': + return payload + + charset = message.get_content_charset() or 'iso-8859-1' + if search_function(charset) is None: + charset = 'iso-8859-1' + return unicode(payload or '', charset, "replace") + else: + payload = UMessage(message.get_payload(index, decode)) + return payload + + def get_content_maintype(self): + return unicode(self.message.get_content_maintype()) + + def get_content_type(self): + return unicode(self.message.get_content_type()) + + def get_filename(self, failobj=None): + value = self.message.get_filename(failobj) + if value is failobj: + return value + try: + return unicode(value) + except UnicodeDecodeError: + return u'error decoding filename' + + else: + + def get_payload(self, index=None, decode=False): + message = self.message + if index is None: + payload = message.get_payload(index, decode) + if isinstance(payload, list): + return [UMessage(msg) for msg in payload] + return payload + else: + payload = UMessage(message.get_payload(index, decode)) + return payload + + def get_content_maintype(self): + return self.message.get_content_maintype() + + def get_content_type(self): + return self.message.get_content_type() + + def get_filename(self, failobj=None): + return self.message.get_filename(failobj) + + # other convenience methods ############################################### + + def headers(self): + """return an unicode string containing all the message's headers""" + values = [] + for header in self.message.keys(): + values.append(u'%s: %s' % (header, self.get(header))) + return '\n'.join(values) + + def multi_addrs(self, header): + """return a list of 2-uple (name, address) for the given address (which + is expected to be an header containing address such as from, to, cc...) + """ + persons = [] + for person in self.get_all(header, ()): + name, mail = parseaddr(person) + persons.append((name, mail)) + return persons + + def date(self, alternative_source=False, return_str=False): + """return a datetime object for the email's date or None if no date is + set or if it can't be parsed + """ + value = self.get('date') + if value is None and alternative_source: + unix_from = self.message.get_unixfrom() + if unix_from is not None: + try: + value = unix_from.split(" ", 2)[2] + except IndexError: + pass + if value is not None: + datetuple = parsedate(value) + if datetuple: + if lgc.USE_MX_DATETIME: + return DateTime(*datetuple[:6]) + return datetime(*datetuple[:6]) + elif not return_str: + return None + return value diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/__init__.py b/pymode/libs/logilab/common/ureports/__init__.py similarity index 93% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/__init__.py rename to pymode/libs/logilab/common/ureports/__init__.py index dcffcfa3..d76ebe52 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/__init__.py +++ b/pymode/libs/logilab/common/ureports/__init__.py @@ -20,13 +20,11 @@ A way to create simple reports using python objects, primarily designed to be formatted as text and html. """ -from __future__ import generators __docformat__ = "restructuredtext en" import sys -from cStringIO import StringIO -from StringIO import StringIO as UStringIO +from logilab.common.compat import StringIO from logilab.common.textutils import linesep @@ -44,13 +42,13 @@ def layout_title(layout): """ for child in layout.children: if isinstance(child, Title): - return ' '.join([node.data for node in get_nodes(child, Text)]) + return u' '.join([node.data for node in get_nodes(child, Text)]) def build_summary(layout, level=1): """make a summary for the report, including X level""" assert level > 0 level -= 1 - summary = List(klass='summary') + summary = List(klass=u'summary') for child in layout.children: if not isinstance(child, Section): continue @@ -59,7 +57,7 @@ def build_summary(layout, level=1): continue if not child.id: child.id = label.replace(' ', '-') - node = Link('#'+child.id, label=label or child.id) + node = Link(u'#'+child.id, label=label or child.id) # FIXME: Three following lines produce not very compliant # docbook: there are some useless . They might be # replaced by the three commented lines but this then produces @@ -101,7 +99,7 @@ def format_children(self, layout): for child in getattr(layout, 'children', ()): child.accept(self) - def writeln(self, string=''): + def writeln(self, string=u''): """write a line in the output buffer""" self.write(string + linesep) @@ -134,7 +132,7 @@ def get_table_content(self, table): result[-1].append(cell) # fill missing cells while len(result[-1]) < cols: - result[-1].append('') + result[-1].append(u'') return result def compute_content(self, layout): @@ -149,7 +147,7 @@ def write(data): stream.write(data) except UnicodeEncodeError: stream.write(data.encode(self.encoding)) - def writeln(data=''): + def writeln(data=u''): try: stream.write(data+linesep) except UnicodeEncodeError: @@ -158,7 +156,7 @@ def writeln(data=''): self.writeln = writeln self.__compute_funcs.append((write, writeln)) for child in layout.children: - stream = UStringIO() + stream = StringIO() child.accept(self) yield stream.getvalue() self.__compute_funcs.pop() diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/docbook_writer.py b/pymode/libs/logilab/common/ureports/docbook_writer.py similarity index 99% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/docbook_writer.py rename to pymode/libs/logilab/common/ureports/docbook_writer.py index e75cbe09..857068c8 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/docbook_writer.py +++ b/pymode/libs/logilab/common/ureports/docbook_writer.py @@ -16,9 +16,10 @@ # You should have received a copy of the GNU Lesser General Public License along # with logilab-common. If not, see . """HTML formatting drivers for ureports""" -from __future__ import generators __docformat__ = "restructuredtext en" +from six.moves import range + from logilab.common.ureports import HTMLWriter class DocbookWriter(HTMLWriter): diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/html_writer.py b/pymode/libs/logilab/common/ureports/html_writer.py similarity index 66% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/html_writer.py rename to pymode/libs/logilab/common/ureports/html_writer.py index 1d095034..eba34ea4 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/html_writer.py +++ b/pymode/libs/logilab/common/ureports/html_writer.py @@ -20,6 +20,8 @@ from cgi import escape +from six.moves import range + from logilab.common.ureports import BaseWriter @@ -32,100 +34,100 @@ def __init__(self, snippet=None): def handle_attrs(self, layout): """get an attribute string from layout member attributes""" - attrs = '' + attrs = u'' klass = getattr(layout, 'klass', None) if klass: - attrs += ' class="%s"' % klass + attrs += u' class="%s"' % klass nid = getattr(layout, 'id', None) if nid: - attrs += ' id="%s"' % nid + attrs += u' id="%s"' % nid return attrs def begin_format(self, layout): """begin to format a layout""" super(HTMLWriter, self).begin_format(layout) if self.snippet is None: - self.writeln('') - self.writeln('') + self.writeln(u'') + self.writeln(u'') def end_format(self, layout): """finished to format a layout""" if self.snippet is None: - self.writeln('') - self.writeln('') + self.writeln(u'') + self.writeln(u'') def visit_section(self, layout): """display a section as html, using div + h[section level]""" self.section += 1 - self.writeln('' % self.handle_attrs(layout)) + self.writeln(u'' % self.handle_attrs(layout)) self.format_children(layout) - self.writeln('') + self.writeln(u'') self.section -= 1 def visit_title(self, layout): """display a title using """ - self.write('' % (self.section, self.handle_attrs(layout))) + self.write(u'' % (self.section, self.handle_attrs(layout))) self.format_children(layout) - self.writeln('' % self.section) + self.writeln(u'' % self.section) def visit_table(self, layout): """display a table as html""" - self.writeln('' % self.handle_attrs(layout)) + self.writeln(u'' % self.handle_attrs(layout)) table_content = self.get_table_content(layout) for i in range(len(table_content)): row = table_content[i] if i == 0 and layout.rheaders: - self.writeln('') + self.writeln(u'') elif i+1 == len(table_content) and layout.rrheaders: - self.writeln('') + self.writeln(u'') else: - self.writeln('' % (i%2 and 'even' or 'odd')) + self.writeln(u'' % (i%2 and 'even' or 'odd')) for j in range(len(row)): - cell = row[j] or ' ' + cell = row[j] or u' ' if (layout.rheaders and i == 0) or \ (layout.cheaders and j == 0) or \ (layout.rrheaders and i+1 == len(table_content)) or \ (layout.rcheaders and j+1 == len(row)): - self.writeln('%s' % cell) + self.writeln(u'%s' % cell) else: - self.writeln('%s' % cell) - self.writeln('') - self.writeln('') + self.writeln(u'%s' % cell) + self.writeln(u'') + self.writeln(u'') def visit_list(self, layout): """display a list as html""" - self.writeln('' % self.handle_attrs(layout)) + self.writeln(u'' % self.handle_attrs(layout)) for row in list(self.compute_content(layout)): - self.writeln('
  • %s
  • ' % row) - self.writeln('') + self.writeln(u'
  • %s
  • ' % row) + self.writeln(u'') def visit_paragraph(self, layout): """display links (using

    )""" - self.write('

    ') + self.write(u'

    ') self.format_children(layout) - self.write('

    ') + self.write(u'

    ') def visit_span(self, layout): """display links (using

    )""" - self.write('' % self.handle_attrs(layout)) + self.write(u'' % self.handle_attrs(layout)) self.format_children(layout) - self.write('') + self.write(u'') def visit_link(self, layout): """display links (using )""" - self.write(' %s' % (layout.url, - self.handle_attrs(layout), - layout.label)) + self.write(u' %s' % (layout.url, + self.handle_attrs(layout), + layout.label)) def visit_verbatimtext(self, layout): """display verbatim text (using

    )"""
    -        self.write('
    ')
    -        self.write(layout.data.replace('&', '&').replace('<', '<'))
    -        self.write('
    ') + self.write(u'
    ')
    +        self.write(layout.data.replace(u'&', u'&').replace(u'<', u'<'))
    +        self.write(u'
    ') def visit_text(self, layout): """add some text""" data = layout.data if layout.escaped: - data = data.replace('&', '&').replace('<', '<') + data = data.replace(u'&', u'&').replace(u'<', u'<') self.write(data) diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/nodes.py b/pymode/libs/logilab/common/ureports/nodes.py similarity index 98% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/nodes.py rename to pymode/libs/logilab/common/ureports/nodes.py index d63b5828..a9585b30 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/nodes.py +++ b/pymode/libs/logilab/common/ureports/nodes.py @@ -23,6 +23,8 @@ from logilab.common.tree import VNode +from six import string_types + class BaseComponent(VNode): """base report component @@ -79,7 +81,7 @@ def __init__(self, data, escaped=True, **kwargs): super(Text, self).__init__(**kwargs) #if isinstance(data, unicode): # data = data.encode('ascii') - assert isinstance(data, (str, unicode)), data.__class__ + assert isinstance(data, string_types), data.__class__ self.escaped = escaped self.data = data diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/text_writer.py b/pymode/libs/logilab/common/ureports/text_writer.py similarity index 82% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/text_writer.py rename to pymode/libs/logilab/common/ureports/text_writer.py index 04c8f263..c87613c9 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/ureports/text_writer.py +++ b/pymode/libs/logilab/common/ureports/text_writer.py @@ -16,14 +16,19 @@ # You should have received a copy of the GNU Lesser General Public License along # with logilab-common. If not, see . """Text formatting drivers for ureports""" + +from __future__ import print_function + __docformat__ = "restructuredtext en" +from six.moves import range + from logilab.common.textutils import linesep from logilab.common.ureports import BaseWriter -TITLE_UNDERLINES = ['', '=', '-', '`', '.', '~', '^'] -BULLETS = ['*', '-'] +TITLE_UNDERLINES = [u'', u'=', u'-', u'`', u'.', u'~', u'^'] +BULLETS = [u'*', u'-'] class TextWriter(BaseWriter): """format layouts as text @@ -43,18 +48,18 @@ def visit_section(self, layout): if self.pending_urls: self.writeln() for label, url in self.pending_urls: - self.writeln('.. _`%s`: %s' % (label, url)) + self.writeln(u'.. _`%s`: %s' % (label, url)) self.pending_urls = [] self.section -= 1 self.writeln() def visit_title(self, layout): - title = ''.join(list(self.compute_content(layout))) + title = u''.join(list(self.compute_content(layout))) self.writeln(title) try: self.writeln(TITLE_UNDERLINES[self.section] * len(title)) except IndexError: - print "FIXME TITLE TOO DEEP. TURNING TITLE INTO TEXT" + print("FIXME TITLE TOO DEEP. TURNING TITLE INTO TEXT") def visit_paragraph(self, layout): """enter a paragraph""" @@ -83,19 +88,19 @@ def visit_table(self, layout): def default_table(self, layout, table_content, cols_width): """format a table""" cols_width = [size+1 for size in cols_width] - format_strings = ' '.join(['%%-%ss'] * len(cols_width)) + format_strings = u' '.join([u'%%-%ss'] * len(cols_width)) format_strings = format_strings % tuple(cols_width) format_strings = format_strings.split(' ') - table_linesep = '\n+' + '+'.join(['-'*w for w in cols_width]) + '+\n' - headsep = '\n+' + '+'.join(['='*w for w in cols_width]) + '+\n' + table_linesep = u'\n+' + u'+'.join([u'-'*w for w in cols_width]) + u'+\n' + headsep = u'\n+' + u'+'.join([u'='*w for w in cols_width]) + u'+\n' # FIXME: layout.cheaders self.write(table_linesep) for i in range(len(table_content)): - self.write('|') + self.write(u'|') line = table_content[i] for j in range(len(line)): self.write(format_strings[j] % line[j]) - self.write('|') + self.write(u'|') if i == 0 and layout.rheaders: self.write(headsep) else: @@ -104,7 +109,7 @@ def default_table(self, layout, table_content, cols_width): def field_table(self, layout, table_content, cols_width): """special case for field table""" assert layout.cols == 2 - format_string = '%s%%-%ss: %%s' % (linesep, cols_width[0]) + format_string = u'%s%%-%ss: %%s' % (linesep, cols_width[0]) for field, value in table_content: self.write(format_string % (field, value)) @@ -115,14 +120,14 @@ def visit_list(self, layout): indent = ' ' * self.list_level self.list_level += 1 for child in layout.children: - self.write('%s%s%s ' % (linesep, indent, bullet)) + self.write(u'%s%s%s ' % (linesep, indent, bullet)) child.accept(self) self.list_level -= 1 def visit_link(self, layout): """add a hyperlink""" if layout.label != layout.url: - self.write('`%s`_' % layout.label) + self.write(u'`%s`_' % layout.label) self.pending_urls.append( (layout.label, layout.url) ) else: self.write(layout.url) @@ -130,11 +135,11 @@ def visit_link(self, layout): def visit_verbatimtext(self, layout): """display a verbatim layout as text (so difficult ;) """ - self.writeln('::\n') + self.writeln(u'::\n') for line in layout.data.splitlines(): - self.writeln(' ' + line) + self.writeln(u' ' + line) self.writeln() def visit_text(self, layout): """add some text""" - self.write(layout.data) + self.write(u'%s' % layout.data) diff --git a/pymode/libs/logilab/common/urllib2ext.py b/pymode/libs/logilab/common/urllib2ext.py new file mode 100644 index 00000000..339aec06 --- /dev/null +++ b/pymode/libs/logilab/common/urllib2ext.py @@ -0,0 +1,89 @@ +from __future__ import print_function + +import logging +import urllib2 + +import kerberos as krb + +class GssapiAuthError(Exception): + """raised on error during authentication process""" + +import re +RGX = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I) + +def get_negociate_value(headers): + for authreq in headers.getheaders('www-authenticate'): + match = RGX.search(authreq) + if match: + return match.group(1) + +class HTTPGssapiAuthHandler(urllib2.BaseHandler): + """Negotiate HTTP authentication using context from GSSAPI""" + + handler_order = 400 # before Digest Auth + + def __init__(self): + self._reset() + + def _reset(self): + self._retried = 0 + self._context = None + + def clean_context(self): + if self._context is not None: + krb.authGSSClientClean(self._context) + + def http_error_401(self, req, fp, code, msg, headers): + try: + if self._retried > 5: + raise urllib2.HTTPError(req.get_full_url(), 401, + "negotiate auth failed", headers, None) + self._retried += 1 + logging.debug('gssapi handler, try %s' % self._retried) + negotiate = get_negociate_value(headers) + if negotiate is None: + logging.debug('no negociate found in a www-authenticate header') + return None + logging.debug('HTTPGssapiAuthHandler: negotiate 1 is %r' % negotiate) + result, self._context = krb.authGSSClientInit("HTTP@%s" % req.get_host()) + if result < 1: + raise GssapiAuthError("HTTPGssapiAuthHandler: init failed with %d" % result) + result = krb.authGSSClientStep(self._context, negotiate) + if result < 0: + raise GssapiAuthError("HTTPGssapiAuthHandler: step 1 failed with %d" % result) + client_response = krb.authGSSClientResponse(self._context) + logging.debug('HTTPGssapiAuthHandler: client response is %s...' % client_response[:10]) + req.add_unredirected_header('Authorization', "Negotiate %s" % client_response) + server_response = self.parent.open(req) + negotiate = get_negociate_value(server_response.info()) + if negotiate is None: + logging.warning('HTTPGssapiAuthHandler: failed to authenticate server') + else: + logging.debug('HTTPGssapiAuthHandler negotiate 2: %s' % negotiate) + result = krb.authGSSClientStep(self._context, negotiate) + if result < 1: + raise GssapiAuthError("HTTPGssapiAuthHandler: step 2 failed with %d" % result) + return server_response + except GssapiAuthError as exc: + logging.error(repr(exc)) + finally: + self.clean_context() + self._reset() + +if __name__ == '__main__': + import sys + # debug + import httplib + httplib.HTTPConnection.debuglevel = 1 + httplib.HTTPSConnection.debuglevel = 1 + # debug + import logging + logging.basicConfig(level=logging.DEBUG) + # handle cookies + import cookielib + cj = cookielib.CookieJar() + ch = urllib2.HTTPCookieProcessor(cj) + # test with url sys.argv[1] + h = HTTPGssapiAuthHandler() + response = urllib2.build_opener(h, ch).open(sys.argv[1]) + print('\nresponse: %s\n--------------\n' % response.code, response.info()) diff --git a/pymode/libs/logilab/common/vcgutils.py b/pymode/libs/logilab/common/vcgutils.py new file mode 100644 index 00000000..9cd2acda --- /dev/null +++ b/pymode/libs/logilab/common/vcgutils.py @@ -0,0 +1,216 @@ +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""Functions to generate files readable with Georg Sander's vcg +(Visualization of Compiler Graphs). + +You can download vcg at http://rw4.cs.uni-sb.de/~sander/html/gshome.html +Note that vcg exists as a debian package. + +See vcg's documentation for explanation about the different values that +maybe used for the functions parameters. + + + + +""" +__docformat__ = "restructuredtext en" + +import string + +ATTRS_VAL = { + 'algos': ('dfs', 'tree', 'minbackward', + 'left_to_right', 'right_to_left', + 'top_to_bottom', 'bottom_to_top', + 'maxdepth', 'maxdepthslow', 'mindepth', 'mindepthslow', + 'mindegree', 'minindegree', 'minoutdegree', + 'maxdegree', 'maxindegree', 'maxoutdegree'), + 'booleans': ('yes', 'no'), + 'colors': ('black', 'white', 'blue', 'red', 'green', 'yellow', + 'magenta', 'lightgrey', + 'cyan', 'darkgrey', 'darkblue', 'darkred', 'darkgreen', + 'darkyellow', 'darkmagenta', 'darkcyan', 'gold', + 'lightblue', 'lightred', 'lightgreen', 'lightyellow', + 'lightmagenta', 'lightcyan', 'lilac', 'turquoise', + 'aquamarine', 'khaki', 'purple', 'yellowgreen', 'pink', + 'orange', 'orchid'), + 'shapes': ('box', 'ellipse', 'rhomb', 'triangle'), + 'textmodes': ('center', 'left_justify', 'right_justify'), + 'arrowstyles': ('solid', 'line', 'none'), + 'linestyles': ('continuous', 'dashed', 'dotted', 'invisible'), + } + +# meaning of possible values: +# O -> string +# 1 -> int +# list -> value in list +GRAPH_ATTRS = { + 'title': 0, + 'label': 0, + 'color': ATTRS_VAL['colors'], + 'textcolor': ATTRS_VAL['colors'], + 'bordercolor': ATTRS_VAL['colors'], + 'width': 1, + 'height': 1, + 'borderwidth': 1, + 'textmode': ATTRS_VAL['textmodes'], + 'shape': ATTRS_VAL['shapes'], + 'shrink': 1, + 'stretch': 1, + 'orientation': ATTRS_VAL['algos'], + 'vertical_order': 1, + 'horizontal_order': 1, + 'xspace': 1, + 'yspace': 1, + 'layoutalgorithm': ATTRS_VAL['algos'], + 'late_edge_labels': ATTRS_VAL['booleans'], + 'display_edge_labels': ATTRS_VAL['booleans'], + 'dirty_edge_labels': ATTRS_VAL['booleans'], + 'finetuning': ATTRS_VAL['booleans'], + 'manhattan_edges': ATTRS_VAL['booleans'], + 'smanhattan_edges': ATTRS_VAL['booleans'], + 'port_sharing': ATTRS_VAL['booleans'], + 'edges': ATTRS_VAL['booleans'], + 'nodes': ATTRS_VAL['booleans'], + 'splines': ATTRS_VAL['booleans'], + } +NODE_ATTRS = { + 'title': 0, + 'label': 0, + 'color': ATTRS_VAL['colors'], + 'textcolor': ATTRS_VAL['colors'], + 'bordercolor': ATTRS_VAL['colors'], + 'width': 1, + 'height': 1, + 'borderwidth': 1, + 'textmode': ATTRS_VAL['textmodes'], + 'shape': ATTRS_VAL['shapes'], + 'shrink': 1, + 'stretch': 1, + 'vertical_order': 1, + 'horizontal_order': 1, + } +EDGE_ATTRS = { + 'sourcename': 0, + 'targetname': 0, + 'label': 0, + 'linestyle': ATTRS_VAL['linestyles'], + 'class': 1, + 'thickness': 0, + 'color': ATTRS_VAL['colors'], + 'textcolor': ATTRS_VAL['colors'], + 'arrowcolor': ATTRS_VAL['colors'], + 'backarrowcolor': ATTRS_VAL['colors'], + 'arrowsize': 1, + 'backarrowsize': 1, + 'arrowstyle': ATTRS_VAL['arrowstyles'], + 'backarrowstyle': ATTRS_VAL['arrowstyles'], + 'textmode': ATTRS_VAL['textmodes'], + 'priority': 1, + 'anchor': 1, + 'horizontal_order': 1, + } + + +# Misc utilities ############################################################### + +def latin_to_vcg(st): + """Convert latin characters using vcg escape sequence. + """ + for char in st: + if char not in string.ascii_letters: + try: + num = ord(char) + if num >= 192: + st = st.replace(char, r'\fi%d'%ord(char)) + except: + pass + return st + + +class VCGPrinter: + """A vcg graph writer. + """ + + def __init__(self, output_stream): + self._stream = output_stream + self._indent = '' + + def open_graph(self, **args): + """open a vcg graph + """ + self._stream.write('%sgraph:{\n'%self._indent) + self._inc_indent() + self._write_attributes(GRAPH_ATTRS, **args) + + def close_graph(self): + """close a vcg graph + """ + self._dec_indent() + self._stream.write('%s}\n'%self._indent) + + + def node(self, title, **args): + """draw a node + """ + self._stream.write('%snode: {title:"%s"' % (self._indent, title)) + self._write_attributes(NODE_ATTRS, **args) + self._stream.write('}\n') + + + def edge(self, from_node, to_node, edge_type='', **args): + """draw an edge from a node to another. + """ + self._stream.write( + '%s%sedge: {sourcename:"%s" targetname:"%s"' % ( + self._indent, edge_type, from_node, to_node)) + self._write_attributes(EDGE_ATTRS, **args) + self._stream.write('}\n') + + + # private ################################################################## + + def _write_attributes(self, attributes_dict, **args): + """write graph, node or edge attributes + """ + for key, value in args.items(): + try: + _type = attributes_dict[key] + except KeyError: + raise Exception('''no such attribute %s +possible attributes are %s''' % (key, attributes_dict.keys())) + + if not _type: + self._stream.write('%s%s:"%s"\n' % (self._indent, key, value)) + elif _type == 1: + self._stream.write('%s%s:%s\n' % (self._indent, key, + int(value))) + elif value in _type: + self._stream.write('%s%s:%s\n' % (self._indent, key, value)) + else: + raise Exception('''value %s isn\'t correct for attribute %s +correct values are %s''' % (value, key, _type)) + + def _inc_indent(self): + """increment indentation + """ + self._indent = ' %s' % self._indent + + def _dec_indent(self): + """decrement indentation + """ + self._indent = self._indent[:-2] diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/visitor.py b/pymode/libs/logilab/common/visitor.py similarity index 97% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/common/visitor.py rename to pymode/libs/logilab/common/visitor.py index 802d2bef..ed2b70f9 100644 --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/visitor.py +++ b/pymode/libs/logilab/common/visitor.py @@ -35,12 +35,14 @@ def __init__(self, node, list_func, filter_func=None): filter_func = no_filter self._list = list_func(node, filter_func) - def next(self): + def __next__(self): try: return self._list.pop(0) except : return None + next = __next__ + # Base Visitor ################################################################ class Visitor(object): @@ -61,10 +63,10 @@ def visit(self, node, *args, **kargs): def _visit(self, node): iterator = self._get_iterator(node) - n = iterator.next() + n = next(iterator) while n: result = n.accept(self) - n = iterator.next() + n = next(iterator) return result def _get_iterator(self, node): diff --git a/pymode/libs/logilab/common/xmlutils.py b/pymode/libs/logilab/common/xmlutils.py new file mode 100644 index 00000000..d383b9d5 --- /dev/null +++ b/pymode/libs/logilab/common/xmlutils.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr +# +# This file is part of logilab-common. +# +# logilab-common is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 2.1 of the License, or (at your option) any +# later version. +# +# logilab-common is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with logilab-common. If not, see . +"""XML utilities. + +This module contains useful functions for parsing and using XML data. For the +moment, there is only one function that can parse the data inside a processing +instruction and return a Python dictionary. + + + + +""" +__docformat__ = "restructuredtext en" + +import re + +RE_DOUBLE_QUOTE = re.compile('([\w\-\.]+)="([^"]+)"') +RE_SIMPLE_QUOTE = re.compile("([\w\-\.]+)='([^']+)'") + +def parse_pi_data(pi_data): + """ + Utility function that parses the data contained in an XML + processing instruction and returns a dictionary of keywords and their + associated values (most of the time, the processing instructions contain + data like ``keyword="value"``, if a keyword is not associated to a value, + for example ``keyword``, it will be associated to ``None``). + + :param pi_data: data contained in an XML processing instruction. + :type pi_data: unicode + + :returns: Dictionary of the keywords (Unicode strings) associated to + their values (Unicode strings) as they were defined in the + data. + :rtype: dict + """ + results = {} + for elt in pi_data.split(): + if RE_DOUBLE_QUOTE.match(elt): + kwd, val = RE_DOUBLE_QUOTE.match(elt).groups() + elif RE_SIMPLE_QUOTE.match(elt): + kwd, val = RE_SIMPLE_QUOTE.match(elt).groups() + else: + kwd, val = elt, None + results[kwd] = val + return results diff --git a/pymode/libs/logilab_common-1.0.2-py2.7-nspkg.pth b/pymode/libs/logilab_common-1.0.2-py2.7-nspkg.pth new file mode 100644 index 00000000..d268b884 --- /dev/null +++ b/pymode/libs/logilab_common-1.0.2-py2.7-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('logilab',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('logilab', types.ModuleType('logilab'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/pymode/libs/logilab_common-1.0.2.dist-info/DESCRIPTION.rst b/pymode/libs/logilab_common-1.0.2.dist-info/DESCRIPTION.rst new file mode 100644 index 00000000..6b483af3 --- /dev/null +++ b/pymode/libs/logilab_common-1.0.2.dist-info/DESCRIPTION.rst @@ -0,0 +1,153 @@ +Logilab's common library +======================== + +What's this ? +------------- + +This package contains some modules used by different Logilab projects. + +It is released under the GNU Lesser General Public License. + +There is no documentation available yet but the source code should be clean and +well documented. + +Designed to ease: + +* handling command line options and configuration files +* writing interactive command line tools +* manipulation of files and character strings +* manipulation of common structures such as graph, tree, and pattern such as visitor +* generating text and HTML reports +* more... + + +Installation +------------ + +Extract the tarball, jump into the created directory and run :: + + python setup.py install + +For installation options, see :: + + python setup.py install --help + + +Provided modules +---------------- + +Here is a brief description of the available modules. + +Modules providing high-level features +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* `cache`, a cache implementation with a least recently used algorithm. + +* `changelog`, a tiny library to manipulate our simplified ChangeLog file format. + +* `clcommands`, high-level classes to define command line programs handling + different subcommands. It is based on `configuration` to get easy command line + / configuration file handling. + +* `configuration`, some classes to handle unified configuration from both + command line (using optparse) and configuration file (using ConfigParser). + +* `proc`, interface to Linux /proc. + +* `umessage`, unicode email support. + +* `ureports`, micro-reports, a way to create simple reports using python objects + without care of the final formatting. ReST and html formatters are provided. + + +Modules providing low-level functions and structures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* `compat`, provides a transparent compatibility layer between different python + versions. + +* `date`, a set of date manipulation functions. + +* `daemon`, a daemon function and mix-in class to properly start an Unix daemon + process. + +* `decorators`, function decorators such as cached, timed... + +* `deprecation`, decorator, metaclass & all to mark functions / classes as + deprecated or moved + +* `fileutils`, some file / file path manipulation utilities. + +* `graph`, graph manipulations functions such as cycle detection, bases for dot + file generation. + +* `modutils`, python module manipulation functions. + +* `shellutils`, some powerful shell like functions to replace shell scripts with + python scripts. + +* `tasksqueue`, a prioritized tasks queue implementation. + +* `textutils`, some text manipulation functions (ansi colorization, line wrapping, + rest support...). + +* `tree`, base class to represent tree structure, and some others to make it + works with the visitor implementation (see below). + +* `visitor`, a generic visitor pattern implementation. + + +Modules extending some standard modules +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* `debugger`, `pdb` customization. + +* `logging_ext`, extensions to `logging` module such as a colorized formatter + and an easier initialization function. + +* `optik_ext`, defines some new option types (regexp, csv, color, date, etc.) + for `optik` / `optparse` + + +Modules extending some external modules +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* `sphinx_ext`, Sphinx_ plugin defining a `autodocstring` directive. + +* `vcgutils` , utilities functions to generate file readable with Georg Sander's + vcg tool (Visualization of Compiler Graphs). + + +To be deprecated modules +~~~~~~~~~~~~~~~~~~~~~~~~ + +Those `logilab.common` modules will much probably be deprecated in future +versions: + +* `testlib`: use `unittest2`_ instead +* `pytest`: use `discover`_ instead +* `interface`: use `zope.interface`_ if you really want this +* `table`, `xmlutils`: is that used? +* `sphinxutils`: we won't go that way imo (i == syt) + + +Comments, support, bug reports +------------------------------ + +Project page https://www.logilab.org/project/logilab-common + +Use the python-projects@lists.logilab.org mailing list. + +You can subscribe to this mailing list at +https://lists.logilab.org/mailman/listinfo/python-projects + +Archives are available at +https://lists.logilab.org/pipermail/python-projects/ + + +.. _Sphinx: http://sphinx.pocoo.org/ +.. _`unittest2`: http://pypi.python.org/pypi/unittest2 +.. _`discover`: http://pypi.python.org/pypi/discover +.. _`zope.interface`: http://pypi.python.org/pypi/zope.interface + + diff --git a/pymode/libs/logilab_common-1.0.2.dist-info/METADATA b/pymode/libs/logilab_common-1.0.2.dist-info/METADATA new file mode 100644 index 00000000..9a00a498 --- /dev/null +++ b/pymode/libs/logilab_common-1.0.2.dist-info/METADATA @@ -0,0 +1,169 @@ +Metadata-Version: 2.0 +Name: logilab-common +Version: 1.0.2 +Summary: collection of low-level Python packages and modules used by Logilab projects +Home-page: http://www.logilab.org/project/logilab-common +Author: Logilab +Author-email: contact@logilab.fr +License: LGPL +Platform: UNKNOWN +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Requires-Dist: setuptools +Requires-Dist: six (>=1.4.0) + +Logilab's common library +======================== + +What's this ? +------------- + +This package contains some modules used by different Logilab projects. + +It is released under the GNU Lesser General Public License. + +There is no documentation available yet but the source code should be clean and +well documented. + +Designed to ease: + +* handling command line options and configuration files +* writing interactive command line tools +* manipulation of files and character strings +* manipulation of common structures such as graph, tree, and pattern such as visitor +* generating text and HTML reports +* more... + + +Installation +------------ + +Extract the tarball, jump into the created directory and run :: + + python setup.py install + +For installation options, see :: + + python setup.py install --help + + +Provided modules +---------------- + +Here is a brief description of the available modules. + +Modules providing high-level features +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* `cache`, a cache implementation with a least recently used algorithm. + +* `changelog`, a tiny library to manipulate our simplified ChangeLog file format. + +* `clcommands`, high-level classes to define command line programs handling + different subcommands. It is based on `configuration` to get easy command line + / configuration file handling. + +* `configuration`, some classes to handle unified configuration from both + command line (using optparse) and configuration file (using ConfigParser). + +* `proc`, interface to Linux /proc. + +* `umessage`, unicode email support. + +* `ureports`, micro-reports, a way to create simple reports using python objects + without care of the final formatting. ReST and html formatters are provided. + + +Modules providing low-level functions and structures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* `compat`, provides a transparent compatibility layer between different python + versions. + +* `date`, a set of date manipulation functions. + +* `daemon`, a daemon function and mix-in class to properly start an Unix daemon + process. + +* `decorators`, function decorators such as cached, timed... + +* `deprecation`, decorator, metaclass & all to mark functions / classes as + deprecated or moved + +* `fileutils`, some file / file path manipulation utilities. + +* `graph`, graph manipulations functions such as cycle detection, bases for dot + file generation. + +* `modutils`, python module manipulation functions. + +* `shellutils`, some powerful shell like functions to replace shell scripts with + python scripts. + +* `tasksqueue`, a prioritized tasks queue implementation. + +* `textutils`, some text manipulation functions (ansi colorization, line wrapping, + rest support...). + +* `tree`, base class to represent tree structure, and some others to make it + works with the visitor implementation (see below). + +* `visitor`, a generic visitor pattern implementation. + + +Modules extending some standard modules +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* `debugger`, `pdb` customization. + +* `logging_ext`, extensions to `logging` module such as a colorized formatter + and an easier initialization function. + +* `optik_ext`, defines some new option types (regexp, csv, color, date, etc.) + for `optik` / `optparse` + + +Modules extending some external modules +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* `sphinx_ext`, Sphinx_ plugin defining a `autodocstring` directive. + +* `vcgutils` , utilities functions to generate file readable with Georg Sander's + vcg tool (Visualization of Compiler Graphs). + + +To be deprecated modules +~~~~~~~~~~~~~~~~~~~~~~~~ + +Those `logilab.common` modules will much probably be deprecated in future +versions: + +* `testlib`: use `unittest2`_ instead +* `pytest`: use `discover`_ instead +* `interface`: use `zope.interface`_ if you really want this +* `table`, `xmlutils`: is that used? +* `sphinxutils`: we won't go that way imo (i == syt) + + +Comments, support, bug reports +------------------------------ + +Project page https://www.logilab.org/project/logilab-common + +Use the python-projects@lists.logilab.org mailing list. + +You can subscribe to this mailing list at +https://lists.logilab.org/mailman/listinfo/python-projects + +Archives are available at +https://lists.logilab.org/pipermail/python-projects/ + + +.. _Sphinx: http://sphinx.pocoo.org/ +.. _`unittest2`: http://pypi.python.org/pypi/unittest2 +.. _`discover`: http://pypi.python.org/pypi/discover +.. _`zope.interface`: http://pypi.python.org/pypi/zope.interface + + diff --git a/pymode/libs/logilab_common-1.0.2.dist-info/RECORD b/pymode/libs/logilab_common-1.0.2.dist-info/RECORD new file mode 100644 index 00000000..e6e4730a --- /dev/null +++ b/pymode/libs/logilab_common-1.0.2.dist-info/RECORD @@ -0,0 +1,87 @@ +logilab_common-1.0.2-py2.7-nspkg.pth,sha256=ZY-Jf8tK2WQu_mjLvZuFpvpX9uwdpX3yDS1AuRncCZA,308 +logilab/common/__init__.py,sha256=UiR9rv7f7WsAnIHsxa3UApVCJGTzXbZoC-c4EQJpcvg,5390 +logilab/common/cache.py,sha256=wmY87WSoyERDhAlfIKKUipYavlZPpm3sGAQMpzbDHTM,3621 +logilab/common/changelog.py,sha256=Ea_4j22rWJJ33VSCj4Lz0pBGP0wP7LMP2Zo4DR7iZIo,8075 +logilab/common/clcommands.py,sha256=abMNAsB6ADT7Ns5MsxNtAMOlTQGJLCMO9MUkNYdsVG8,11237 +logilab/common/compat.py,sha256=rMGytWS1DCo35MdKUocU1LfLbZA0RyK79Gyu7lvd6Rg,2593 +logilab/common/configuration.py,sha256=s4rg7Qa1_4bpWlTg-bEaHYUcrgvuoDt75ZJgRnlFsME,42160 +logilab/common/daemon.py,sha256=Eqwo_oKjrHtS9SLrtSfeghRTCjqvveGho43s7vMkd7A,3337 +logilab/common/date.py,sha256=nnUN-4onEaWSR8r4PvtmJyn5ukfFzasjEcOGzEdrvqQ,11230 +logilab/common/debugger.py,sha256=Bw2-yI9KrvSgPLDksda4F8nuK_DvxnSCS-ymPSVc778,7094 +logilab/common/decorators.py,sha256=4DD3iNgEQPVz5hPp-SbbgD-ZObXhaeazGqKleyHdXaw,8868 +logilab/common/deprecation.py,sha256=MAxc_Ds9H_j6C7d4VQqMQPB1j-Ib8vy7iBWoQa8aRHs,7417 +logilab/common/fileutils.py,sha256=kCk_8odmAKnYPHPhUruuV-6og8N9kT8fplV-pvwwd4A,12738 +logilab/common/graph.py,sha256=GTSN-kP40EHjnHXk1vxO-56rEszo-esu1S3hf-SOddw,10247 +logilab/common/interface.py,sha256=dXl6kiuXSpefxauu7J6CUv0soe09wjT4_vXbeWQFgJ8,2593 +logilab/common/logging_ext.py,sha256=Yi8k2fGqr_tt-YApT1JjroNpXETxfj84HKmgTgO22Nw,6975 +logilab/common/modutils.py,sha256=w2LVy_vzhGoyBRrKivx0hqx8n326KrtTUezelEwDAcc,24002 +logilab/common/optik_ext.py,sha256=_aZgWKTKCC8_vYIpstNCOk8wewwZ4jfrpvXWrmPzn5Y,13451 +logilab/common/optparser.py,sha256=QgDoAyVoRy7U1fG9BSZ0O7LQsyNayo1HAelZaKlb4kY,3386 +logilab/common/proc.py,sha256=RGMlPuc11FfrIsqzqNFO3Q6buqt8dvMwXfXKXfwAHks,9352 +logilab/common/pytest.py,sha256=ac7hVpAb06TstSjPV586h1wW21Y__XH5bjrwX55dDOE,46736 +logilab/common/registry.py,sha256=0qIJfNJiqM1HkI-twKHfXiTPU5HKSGRrS-P0Dsj56qw,41550 +logilab/common/shellutils.py,sha256=ZFZ19eX0TCcDrsbOWiy7sr1oqnhQsLixv9n8HakcJiM,14363 +logilab/common/sphinx_ext.py,sha256=pbKN0ObMDY_jy9ehP_7NOKMo40LbQLjf0xntmxHnGr8,3329 +logilab/common/sphinxutils.py,sha256=piY1R04GNR-i1mIb4PRhbGbmbDZPhDsn1FBAiA_Bbrg,4444 +logilab/common/table.py,sha256=5NEx4Ju-jk2CV6W-jxTpOoYArt2BlRpaTZZUBGwu1kg,31408 +logilab/common/tasksqueue.py,sha256=wFE0C0FiuHGBoCnvU-_Kno1eM_Em6yYxYvND6emRN34,2987 +logilab/common/testlib.py,sha256=2Ra9OPs5QpQv7hoZod3M2yYCUdtqSaN3LAvVyiQyA1k,50506 +logilab/common/textutils.py,sha256=TgPGqkN3JsJuR7VxnkoWaOWfkwHiVNB9gpId_3S2xO4,17277 +logilab/common/tree.py,sha256=Y-sa_pfI17cCb-vkyJMaBW3XKVNrreexBgBMPpQJDy0,10606 +logilab/common/umessage.py,sha256=2BuxspHkPEXhlf-XVDye25Mt0RUELneay-K1KNLcS9c,6551 +logilab/common/urllib2ext.py,sha256=FOpxVrbAPtY_6ssq3Qui3zxzckAqLJe9kGkp8tLR0Ic,3416 +logilab/common/vcgutils.py,sha256=tNfi6jxZ4xdUvrjw1cKOodecRlcD0U3MQvTb5HrY5fE,7673 +logilab/common/visitor.py,sha256=5Oc9Y88Kx4wiZ6JAFYFeXwKrMS8jNph9ENVWG3oim1E,3444 +logilab/common/xmlutils.py,sha256=2e4FM-X1PLKBaTG6etLHsAIrtZQiDEA9U7WqM3KjNks,2273 +logilab/common/ureports/__init__.py,sha256=b3_8f4mAm6T3O_-klutleWZ99XjlR-AELfuLEyCbzQ8,6113 +logilab/common/ureports/docbook_writer.py,sha256=KSkIk0W4C4E6DR-Ul_Y9jgnd4_tgVVu15LnU8p2RoeM,5706 +logilab/common/ureports/html_writer.py,sha256=Ee_x9rXjx2NZp290e-0C7nu7VYuKpkCsrl79m4HLI5g,4956 +logilab/common/ureports/nodes.py,sha256=t2NQiL6LQV94D8ugitklVnZRVbz6kP5QkUrl8zGsmMQ,5838 +logilab/common/ureports/text_writer.py,sha256=cMBHbA36_1NrKKnx5LBKczGQmBRg4aObkpr1d581ORU,5212 +../../bin/pytest,sha256=vkYcOC21mDzGBrz4-ajilr8TGxa9tRabxQhyYyXeEDE,124 +logilab_common-1.0.2.dist-info/DESCRIPTION.rst,sha256=bMLyPRBRS-tSzW5zhchxcLlPbYHRv0XEMqs6Oln2z5U,4426 +logilab_common-1.0.2.dist-info/METADATA,sha256=3_iFYhN84fXSjkdjzHv3grHBY2xIZVLSkmuBeTSnLQE,4934 +logilab_common-1.0.2.dist-info/metadata.json,sha256=dTwpZUieC7dZFkKiNdtgVExm2w1B44k4ZDSaCP3ASXo,742 +logilab_common-1.0.2.dist-info/namespace_packages.txt,sha256=xXemaIbd-285ANf3yiCDkMHRTZSuLvlqL_MTLEJKMuk,8 +logilab_common-1.0.2.dist-info/RECORD,, +logilab_common-1.0.2.dist-info/top_level.txt,sha256=xXemaIbd-285ANf3yiCDkMHRTZSuLvlqL_MTLEJKMuk,8 +logilab_common-1.0.2.dist-info/WHEEL,sha256=54bVun1KfEBTJ68SHUmbxNPj80VxlQ0sHi4gZdGZXEY,92 +logilab/common/logging_ext.pyc,, +logilab/common/date.pyc,, +logilab/common/modutils.pyc,, +logilab/common/ureports/__init__.pyc,, +logilab/common/sphinxutils.pyc,, +logilab/common/ureports/text_writer.pyc,, +logilab/common/optik_ext.pyc,, +logilab/common/visitor.pyc,, +logilab/common/debugger.pyc,, +logilab/common/compat.pyc,, +logilab/common/decorators.pyc,, +logilab/common/textutils.pyc,, +logilab/common/ureports/docbook_writer.pyc,, +logilab/common/shellutils.pyc,, +logilab/common/changelog.pyc,, +logilab/common/interface.pyc,, +logilab/common/ureports/nodes.pyc,, +logilab/common/pytest.pyc,, +logilab/common/sphinx_ext.pyc,, +logilab/common/xmlutils.pyc,, +logilab/common/__init__.pyc,, +logilab/common/tree.pyc,, +logilab/common/umessage.pyc,, +logilab/common/registry.pyc,, +logilab/common/proc.pyc,, +logilab/common/urllib2ext.pyc,, +logilab/common/testlib.pyc,, +logilab/common/clcommands.pyc,, +logilab/common/ureports/html_writer.pyc,, +logilab/common/vcgutils.pyc,, +logilab/common/daemon.pyc,, +logilab/common/table.pyc,, +logilab/common/optparser.pyc,, +logilab/common/deprecation.pyc,, +logilab/common/tasksqueue.pyc,, +logilab/common/fileutils.pyc,, +logilab/common/graph.pyc,, +logilab/common/cache.pyc,, +logilab/common/configuration.pyc,, diff --git a/pymode/libs/logilab_common-1.0.2.dist-info/WHEEL b/pymode/libs/logilab_common-1.0.2.dist-info/WHEEL new file mode 100644 index 00000000..45a0cd88 --- /dev/null +++ b/pymode/libs/logilab_common-1.0.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any + diff --git a/pymode/libs/logilab_common-1.0.2.dist-info/metadata.json b/pymode/libs/logilab_common-1.0.2.dist-info/metadata.json new file mode 100644 index 00000000..54212666 --- /dev/null +++ b/pymode/libs/logilab_common-1.0.2.dist-info/metadata.json @@ -0,0 +1 @@ +{"license": "LGPL", "name": "logilab-common", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "test_requires": [{"requires": ["pytz"]}], "summary": "collection of low-level Python packages and modules used by Logilab projects", "run_requires": [{"requires": ["setuptools", "six (>=1.4.0)"]}], "version": "1.0.2", "extensions": {"python.details": {"project_urls": {"Home": "http://www.logilab.org/project/logilab-common"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "contact@logilab.fr", "name": "Logilab"}]}}, "classifiers": ["Topic :: Utilities", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3"], "extras": []} \ No newline at end of file diff --git a/pymode/libs/logilab_common-1.0.2.dist-info/namespace_packages.txt b/pymode/libs/logilab_common-1.0.2.dist-info/namespace_packages.txt new file mode 100644 index 00000000..3ac267a9 --- /dev/null +++ b/pymode/libs/logilab_common-1.0.2.dist-info/namespace_packages.txt @@ -0,0 +1 @@ +logilab diff --git a/pymode/libs/logilab_common-1.0.2.dist-info/top_level.txt b/pymode/libs/logilab_common-1.0.2.dist-info/top_level.txt new file mode 100644 index 00000000..3ac267a9 --- /dev/null +++ b/pymode/libs/logilab_common-1.0.2.dist-info/top_level.txt @@ -0,0 +1 @@ +logilab diff --git a/pymode/libs/pylama/lint/pylama_mccabe/mccabe.py b/pymode/libs/mccabe.py similarity index 86% rename from pymode/libs/pylama/lint/pylama_mccabe/mccabe.py rename to pymode/libs/mccabe.py index 82bdf5f0..90bf19cd 100644 --- a/pymode/libs/pylama/lint/pylama_mccabe/mccabe.py +++ b/pymode/libs/mccabe.py @@ -14,7 +14,7 @@ except ImportError: # Python 2.5 from flake8.util import ast, iter_child_nodes -__version__ = '0.2.1' +__version__ = '0.3.1' class ASTVisitor(object): @@ -67,6 +67,8 @@ def __init__(self, name, entity, lineno): def connect(self, n1, n2): self.nodes[n1].append(n2) + # Ensure that the destination node is always counted. + self.nodes[n2] = [] def to_dot(self): print('subgraph {') @@ -160,34 +162,37 @@ def visitSimpleStatement(self, node): def visitLoop(self, node): name = "Loop %d" % node.lineno + self._subgraph(node, name) + visitFor = visitWhile = visitLoop + + def visitIf(self, node): + name = "If %d" % node.lineno + self._subgraph(node, name) + + def _subgraph(self, node, name, extra_blocks=()): + """create the subgraphs representing any `if` and `for` statements""" if self.graph is None: # global loop self.graph = PathGraph(name, name, node.lineno) pathnode = PathNode(name) - self.tail = pathnode - self.dispatch_list(node.body) + self._subgraph_parse(node, pathnode, extra_blocks) self.graphs["%s%s" % (self.classname, name)] = self.graph self.reset() else: pathnode = self.appendPathNode(name) - self.tail = pathnode - self.dispatch_list(node.body) - bottom = PathNode("", look='point') - self.graph.connect(self.tail, bottom) - self.graph.connect(pathnode, bottom) - self.tail = bottom - - # TODO: else clause in node.orelse + self._subgraph_parse(node, pathnode, extra_blocks) - visitFor = visitWhile = visitLoop - - def visitIf(self, node): - name = "If %d" % node.lineno - pathnode = self.appendPathNode(name) + def _subgraph_parse(self, node, pathnode, extra_blocks): + """parse the body and any `else` block of `if` and `for` statements""" loose_ends = [] + self.tail = pathnode self.dispatch_list(node.body) loose_ends.append(self.tail) + for extra in extra_blocks: + self.tail = pathnode + self.dispatch_list(extra.body) + loose_ends.append(self.tail) if node.orelse: self.tail = pathnode self.dispatch_list(node.orelse) @@ -202,19 +207,9 @@ def visitIf(self, node): def visitTryExcept(self, node): name = "TryExcept %d" % node.lineno - pathnode = self.appendPathNode(name) - loose_ends = [] - self.dispatch_list(node.body) - loose_ends.append(self.tail) - for handler in node.handlers: - self.tail = pathnode - self.dispatch_list(handler.body) - loose_ends.append(self.tail) - if pathnode: - bottom = PathNode("", look='point') - for le in loose_ends: - self.graph.connect(le, bottom) - self.tail = bottom + self._subgraph(node, name, extra_blocks=node.handlers) + + visitTry = visitTryExcept def visitWith(self, node): name = "With %d" % node.lineno @@ -241,7 +236,7 @@ def add_options(cls, parser): @classmethod def parse_options(cls, options): - cls.max_complexity = options.max_complexity + cls.max_complexity = int(options.max_complexity) def run(self): if self.max_complexity < 0: @@ -249,7 +244,7 @@ def run(self): visitor = PathGraphingAstVisitor() visitor.preorder(self.tree, visitor) for graph in visitor.graphs.values(): - if graph.complexity() >= self.max_complexity: + if graph.complexity() > self.max_complexity: text = self._error_tmpl % (graph.entity, graph.complexity()) yield graph.lineno, 0, text, type(self) @@ -265,13 +260,12 @@ def get_code_complexity(code, threshold=7, filename='stdin'): complx = [] McCabeChecker.max_complexity = threshold for lineno, offset, text, check in McCabeChecker(tree, filename).run(): - complx.append(dict( - type=McCabeChecker._code, - lnum=lineno, - text=text, - )) + complx.append('%s:%d:1: %s' % (filename, lineno, text)) - return complx + if len(complx) == 0: + return 0 + print('\n'.join(complx)) + return len(complx) def get_module_complexity(module_path, threshold=7): @@ -281,13 +275,15 @@ def get_module_complexity(module_path, threshold=7): return get_code_complexity(code, threshold, filename=module_path) -def main(argv): +def main(argv=None): + if argv is None: + argv = sys.argv[1:] opar = optparse.OptionParser() opar.add_option("-d", "--dot", dest="dot", help="output a graphviz dot file", action="store_true") opar.add_option("-m", "--min", dest="threshold", help="minimum complexity for output", type="int", - default=2) + default=1) options, args = opar.parse_args(argv) @@ -300,7 +296,8 @@ def main(argv): if options.dot: print('graph {') for graph in visitor.graphs.values(): - if graph.complexity() >= options.threshold: + if (not options.threshold or + graph.complexity() >= options.threshold): graph.to_dot() print('}') else: @@ -311,3 +308,4 @@ def main(argv): if __name__ == '__main__': main(sys.argv[1:]) + diff --git a/pymode/libs/pep257.py b/pymode/libs/pep257.py new file mode 100644 index 00000000..79d9eee1 --- /dev/null +++ b/pymode/libs/pep257.py @@ -0,0 +1,1187 @@ +#! /usr/bin/env python +"""Static analysis tool for checking docstring conventions and style. + +Implemented checks cover PEP257: +http://www.python.org/dev/peps/pep-0257/ + +Other checks can be added, e.g. NumPy docstring conventions: +https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt + +The repository is located at: +http://github.com/GreenSteam/pep257 + +""" +from __future__ import with_statement + +import os +import sys +import logging +import tokenize as tk +from itertools import takewhile, dropwhile, chain +from optparse import OptionParser +from re import compile as re +import itertools + +try: # Python 3.x + from ConfigParser import RawConfigParser +except ImportError: # Python 2.x + from configparser import RawConfigParser + +log = logging.getLogger(__name__) + + +try: + from StringIO import StringIO +except ImportError: # Python 3.0 and later + from io import StringIO + + +try: + next +except NameError: # Python 2.5 and earlier + nothing = object() + + def next(obj, default=nothing): + if default == nothing: + return obj.next() + else: + try: + return obj.next() + except StopIteration: + return default + + +# If possible (python >= 3.2) use tokenize.open to open files, so PEP 263 +# encoding markers are interpreted. +try: + tokenize_open = tk.open +except AttributeError: + tokenize_open = open + + +__version__ = '0.6.1-alpha' +__all__ = ('check', 'collect') + +PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep257') +NO_VIOLATIONS_RETURN_CODE = 0 +VIOLATIONS_RETURN_CODE = 1 +INVALID_OPTIONS_RETURN_CODE = 2 + + +def humanize(string): + return re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower() + + +def is_magic(name): + return name.startswith('__') and name.endswith('__') + + +def is_ascii(string): + return all(ord(char) < 128 for char in string) + + +def is_blank(string): + return not string.strip() + + +def leading_space(string): + return re('\s*').match(string).group() + + +class Value(object): + + def __init__(self, *args): + vars(self).update(zip(self._fields, args)) + + def __hash__(self): + return hash(repr(self)) + + def __eq__(self, other): + return other and vars(self) == vars(other) + + def __repr__(self): + kwargs = ', '.join('{}={!r}'.format(field, getattr(self, field)) + for field in self._fields) + return '{}({})'.format(self.__class__.__name__, kwargs) + + +class Definition(Value): + + _fields = ('name', '_source', 'start', 'end', 'decorators', 'docstring', + 'children', 'parent') + + _human = property(lambda self: humanize(type(self).__name__)) + kind = property(lambda self: self._human.split()[-1]) + module = property(lambda self: self.parent.module) + all = property(lambda self: self.module.all) + _slice = property(lambda self: slice(self.start - 1, self.end)) + source = property(lambda self: ''.join(self._source[self._slice])) + + def __iter__(self): + return chain([self], *self.children) + + @property + def _publicity(self): + return {True: 'public', False: 'private'}[self.is_public] + + def __str__(self): + return 'in %s %s `%s`' % (self._publicity, self._human, self.name) + + +class Module(Definition): + + _fields = ('name', '_source', 'start', 'end', 'decorators', 'docstring', + 'children', 'parent', '_all') + is_public = True + _nest = staticmethod(lambda s: {'def': Function, 'class': Class}[s]) + module = property(lambda self: self) + all = property(lambda self: self._all) + + def __str__(self): + return 'at module level' + + +class Package(Module): + + """A package is a __init__.py module.""" + + +class Function(Definition): + + _nest = staticmethod(lambda s: {'def': NestedFunction, + 'class': NestedClass}[s]) + + @property + def is_public(self): + if self.all is not None: + return self.name in self.all + else: # TODO: are there any magic functions? not methods + return not self.name.startswith('_') or is_magic(self.name) + + +class NestedFunction(Function): + + is_public = False + + +class Method(Function): + + @property + def is_public(self): + # Check if we are a setter/deleter method, and mark as private if so. + for decorator in self.decorators: + # Given 'foo', match 'foo.bar' but not 'foobar' or 'sfoo' + if re(r"^{0}\.".format(self.name)).match(decorator.name): + return False + name_is_public = not self.name.startswith('_') or is_magic(self.name) + return self.parent.is_public and name_is_public + + +class Class(Definition): + + _nest = staticmethod(lambda s: {'def': Method, 'class': NestedClass}[s]) + is_public = Function.is_public + + +class NestedClass(Class): + + is_public = False + + +class Decorator(Value): + + """A decorator for function, method or class.""" + + _fields = 'name arguments'.split() + + +class TokenKind(int): + def __repr__(self): + return "tk.{}".format(tk.tok_name[self]) + + +class Token(Value): + + _fields = 'kind value start end source'.split() + + def __init__(self, *args): + super(Token, self).__init__(*args) + self.kind = TokenKind(self.kind) + + +class TokenStream(object): + + def __init__(self, filelike): + self._generator = tk.generate_tokens(filelike.readline) + self.current = Token(*next(self._generator, None)) + self.line = self.current.start[0] + + def move(self): + previous = self.current + current = next(self._generator, None) + self.current = None if current is None else Token(*current) + self.line = self.current.start[0] if self.current else self.line + return previous + + def __iter__(self): + while True: + if self.current is not None: + yield self.current + else: + return + self.move() + + +class AllError(Exception): + + def __init__(self, message): + Exception.__init__( + self, message + + 'That means pep257 cannot decide which definitions are public. ' + 'Variable __all__ should be present at most once in each file, ' + "in form `__all__ = ('a_public_function', 'APublicClass', ...)`. " + 'More info on __all__: http://stackoverflow.com/q/44834/. ') + + +class Parser(object): + + def __call__(self, filelike, filename): + self.source = filelike.readlines() + src = ''.join(self.source) + self.stream = TokenStream(StringIO(src)) + self.filename = filename + self.all = None + self._accumulated_decorators = [] + return self.parse_module() + + current = property(lambda self: self.stream.current) + line = property(lambda self: self.stream.line) + + def consume(self, kind): + assert self.stream.move().kind == kind + + def leapfrog(self, kind, value=None): + """Skip tokens in the stream until a certain token kind is reached. + + If `value` is specified, tokens whose values are different will also + be skipped. + """ + while self.current is not None: + if (self.current.kind == kind and + (value is None or self.current.value == value)): + self.consume(kind) + return + self.stream.move() + + def parse_docstring(self): + """Parse a single docstring and return its value.""" + log.debug("parsing docstring, token is %r (%s)", + self.current.kind, self.current.value) + while self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL): + self.stream.move() + log.debug("parsing docstring, token is %r (%s)", + self.current.kind, self.current.value) + if self.current.kind == tk.STRING: + docstring = self.current.value + self.stream.move() + return docstring + return None + + def parse_decorators(self): + """Called after first @ is found. + + Parse decorators into self._accumulated_decorators. + Continue to do so until encountering the 'def' or 'class' start token. + """ + name = [] + arguments = [] + at_arguments = False + + while self.current is not None: + if (self.current.kind == tk.NAME and + self.current.value in ['def', 'class']): + # Done with decorators - found function or class proper + break + elif self.current.kind == tk.OP and self.current.value == '@': + # New decorator found. Store the decorator accumulated so far: + self._accumulated_decorators.append( + Decorator(''.join(name), ''.join(arguments))) + # Now reset to begin accumulating the new decorator: + name = [] + arguments = [] + at_arguments = False + elif self.current.kind == tk.OP and self.current.value == '(': + at_arguments = True + elif self.current.kind == tk.OP and self.current.value == ')': + # Ignore close parenthesis + pass + elif self.current.kind == tk.NEWLINE or self.current.kind == tk.NL: + # Ignore newlines + pass + else: + # Keep accumulating current decorator's name or argument. + if not at_arguments: + name.append(self.current.value) + else: + arguments.append(self.current.value) + self.stream.move() + + # Add decorator accumulated so far + self._accumulated_decorators.append( + Decorator(''.join(name), ''.join(arguments))) + + def parse_definitions(self, class_, all=False): + """Parse multiple defintions and yield them.""" + while self.current is not None: + log.debug("parsing defintion list, current token is %r (%s)", + self.current.kind, self.current.value) + if all and self.current.value == '__all__': + self.parse_all() + elif self.current.kind == tk.OP and self.current.value == '@': + self.consume(tk.OP) + self.parse_decorators() + elif self.current.value in ['def', 'class']: + yield self.parse_definition(class_._nest(self.current.value)) + elif self.current.kind == tk.INDENT: + self.consume(tk.INDENT) + for definition in self.parse_definitions(class_): + yield definition + elif self.current.kind == tk.DEDENT: + self.consume(tk.DEDENT) + return + else: + self.stream.move() + + def parse_all(self): + """Parse the __all__ definition in a module.""" + assert self.current.value == '__all__' + self.consume(tk.NAME) + if self.current.value != '=': + raise AllError('Could not evaluate contents of __all__. ') + self.consume(tk.OP) + if self.current.value not in '([': + raise AllError('Could not evaluate contents of __all__. ') + if self.current.value == '[': + msg = ("%s WARNING: __all__ is defined as a list, this means " + "pep257 cannot reliably detect contents of the __all__ " + "variable, because it can be mutated. Change __all__ to be " + "an (immutable) tuple, to remove this warning. Note, " + "pep257 uses __all__ to detect which definitions are " + "public, to warn if public definitions are missing " + "docstrings. If __all__ is a (mutable) list, pep257 cannot " + "reliably assume its contents. pep257 will proceed " + "assuming __all__ is not mutated.\n" % self.filename) + sys.stderr.write(msg) + self.consume(tk.OP) + + self.all = [] + all_content = "(" + while self.current.kind != tk.OP or self.current.value not in ")]": + if self.current.kind in (tk.NL, tk.COMMENT): + pass + elif (self.current.kind == tk.STRING or + self.current.value == ','): + all_content += self.current.value + else: + kind = token.tok_name[self.current.kind] + raise AllError('Unexpected token kind in __all__: %s' % kind) + self.stream.move() + self.consume(tk.OP) + all_content += ")" + try: + self.all = eval(all_content, {}) + except BaseException as e: + raise AllError('Could not evaluate contents of __all__.' + '\bThe value was %s. The exception was:\n%s' + % (all_content, e)) + + def parse_module(self): + """Parse a module (and its children) and return a Module object.""" + log.debug("parsing module.") + start = self.line + docstring = self.parse_docstring() + children = list(self.parse_definitions(Module, all=True)) + assert self.current is None, self.current + end = self.line + cls = Module + if self.filename.endswith('__init__.py'): + cls = Package + module = cls(self.filename, self.source, start, end, + [], docstring, children, None, self.all) + for child in module.children: + child.parent = module + log.debug("finished parsing module.") + return module + + def parse_definition(self, class_): + """Parse a defintion and return its value in a `class_` object.""" + start = self.line + self.consume(tk.NAME) + name = self.current.value + log.debug("parsing %s '%s'", class_.__name__, name) + self.stream.move() + if self.current.kind == tk.OP and self.current.value == '(': + parenthesis_level = 0 + while True: + if self.current.kind == tk.OP: + if self.current.value == '(': + parenthesis_level += 1 + elif self.current.value == ')': + parenthesis_level -= 1 + if parenthesis_level == 0: + break + self.stream.move() + if self.current.kind != tk.OP or self.current.value != ':': + self.leapfrog(tk.OP, value=":") + else: + self.consume(tk.OP) + if self.current.kind in (tk.NEWLINE, tk.COMMENT): + self.leapfrog(tk.INDENT) + assert self.current.kind != tk.INDENT + docstring = self.parse_docstring() + decorators = self._accumulated_decorators + self._accumulated_decorators = [] + log.debug("parsing nested defintions.") + children = list(self.parse_definitions(class_)) + log.debug("finished parsing nested defintions for '%s'", name) + end = self.line - 1 + else: # one-liner definition + docstring = self.parse_docstring() + decorators = [] # TODO + children = [] + end = self.line + self.leapfrog(tk.NEWLINE) + definition = class_(name, self.source, start, end, + decorators, docstring, children, None) + for child in definition.children: + child.parent = definition + log.debug("finished parsing %s '%s'. Next token is %r (%s)", + class_.__name__, name, self.current.kind, + self.current.value) + return definition + + +class Error(object): + + """Error in docstring style.""" + + # should be overridden by inheriting classes + code = None + short_desc = None + context = None + + # Options that define how errors are printed: + explain = False + source = False + + def __init__(self, *parameters): + self.parameters = parameters + self.definition = None + self.explanation = None + + def set_context(self, definition, explanation): + self.definition = definition + self.explanation = explanation + + filename = property(lambda self: self.definition.module.name) + line = property(lambda self: self.definition.start) + + @property + def message(self): + ret = '%s: %s' % (self.code, self.short_desc) + if self.context is not None: + ret += ' (' + self.context % self.parameters + ')' + return ret + + @property + def lines(self): + source = '' + lines = self.definition._source[self.definition._slice] + offset = self.definition.start + lines_stripped = list(reversed(list(dropwhile(is_blank, + reversed(lines))))) + numbers_width = 0 + for n, line in enumerate(lines_stripped): + numbers_width = max(numbers_width, n + offset) + numbers_width = len(str(numbers_width)) + numbers_width = 6 + for n, line in enumerate(lines_stripped): + source += '%*d: %s' % (numbers_width, n + offset, line) + if n > 5: + source += ' ...\n' + break + return source + + def __str__(self): + self.explanation = '\n'.join(l for l in self.explanation.split('\n') + if not is_blank(l)) + template = '%(filename)s:%(line)s %(definition)s:\n %(message)s' + if self.source and self.explain: + template += '\n\n%(explanation)s\n\n%(lines)s\n' + elif self.source and not self.explain: + template += '\n\n%(lines)s\n' + elif self.explain and not self.source: + template += '\n\n%(explanation)s\n\n' + return template % dict((name, getattr(self, name)) for name in + ['filename', 'line', 'definition', 'message', + 'explanation', 'lines']) + + __repr__ = __str__ + + def __lt__(self, other): + return (self.filename, self.line) < (other.filename, other.line) + + +class ErrorRegistry(object): + groups = [] + + class ErrorGroup(object): + + def __init__(self, prefix, name): + self.prefix = prefix + self.name = name + self.errors = [] + + def create_error(self, error_code, error_desc, error_context=None): + # TODO: check prefix + + class _Error(Error): + code = error_code + short_desc = error_desc + context = error_context + + self.errors.append(_Error) + return _Error + + @classmethod + def create_group(cls, prefix, name): + group = cls.ErrorGroup(prefix, name) + cls.groups.append(group) + return group + + @classmethod + def get_error_codes(cls): + for group in cls.groups: + for error in group.errors: + yield error.code + + @classmethod + def to_rst(cls): + sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n' + blank_line = '|' + 78 * ' ' + '|\n' + table = '' + for group in cls.groups: + table += sep_line + table += blank_line + table += '|' + ('**%s**' % group.name).center(78) + '|\n' + table += blank_line + for error in group.errors: + table += sep_line + table += ('|' + error.code.center(6) + '| ' + + error.short_desc.ljust(70) + '|\n') + table += sep_line + return table + + +D1xx = ErrorRegistry.create_group('D1', 'Missing Docstrings') +D100 = D1xx.create_error('D100', 'Missing docstring in public module') +D101 = D1xx.create_error('D101', 'Missing docstring in public class') +D102 = D1xx.create_error('D102', 'Missing docstring in public method') +D103 = D1xx.create_error('D103', 'Missing docstring in public function') +D104 = D1xx.create_error('D104', 'Missing docstring in public package') + +D2xx = ErrorRegistry.create_group('D2', 'Whitespace Issues') +D200 = D2xx.create_error('D200', 'One-line docstring should fit on one line ' + 'with quotes', 'found %s') +D201 = D2xx.create_error('D201', 'No blank lines allowed before function ' + 'docstring', 'found %s') +D202 = D2xx.create_error('D202', 'No blank lines allowed after function ' + 'docstring', 'found %s') +D203 = D2xx.create_error('D203', '1 blank line required before class ' + 'docstring', 'found %s') +D204 = D2xx.create_error('D204', '1 blank line required after class ' + 'docstring', 'found %s') +D205 = D2xx.create_error('D205', '1 blank line required between summary line ' + 'and description', 'found %s') +D206 = D2xx.create_error('D206', 'Docstring should be indented with spaces, ' + 'not tabs') +D207 = D2xx.create_error('D207', 'Docstring is under-indented') +D208 = D2xx.create_error('D208', 'Docstring is over-indented') +D209 = D2xx.create_error('D209', 'Multi-line docstring closing quotes should ' + 'be on a separate line') +D210 = D2xx.create_error('D210', 'No whitespaces allowed surrounding ' + 'docstring text') + +D3xx = ErrorRegistry.create_group('D3', 'Quotes Issues') +D300 = D3xx.create_error('D300', 'Use """triple double quotes"""', + 'found %s-quotes') +D301 = D3xx.create_error('D301', 'Use r""" if any backslashes in a docstring') +D302 = D3xx.create_error('D302', 'Use u""" for Unicode docstrings') + +D4xx = ErrorRegistry.create_group('D4', 'Docstring Content Issues') +D400 = D4xx.create_error('D400', 'First line should end with a period', + 'not %r') +D401 = D4xx.create_error('D401', 'First line should be in imperative mood', + '%r, not %r') +D402 = D4xx.create_error('D402', 'First line should not be the function\'s ' + '"signature"') + + +class Conventions(object): + pep257 = set(ErrorRegistry.get_error_codes()) + + +def get_option_parser(): + parser = OptionParser(version=__version__, + usage='Usage: pep257 [options] [...]') + parser.config_options = ('explain', 'source', 'ignore', 'match', 'select', + 'match-dir', 'debug', 'verbose', 'count', + 'convention') + option = parser.add_option + option('-e', '--explain', action='store_true', + help='show explanation of each error') + option('-s', '--source', action='store_true', + help='show source for each error') + option('--select', metavar='', default='', + help='choose the basic list of checked errors by specifying which ' + 'errors to check for (with a list of comma-separated error ' + 'codes). for example: --select=D101,D202') + option('--ignore', metavar='', default='', + help='choose the basic list of checked errors by specifying which ' + 'errors to ignore (with a list of comma-separated error ' + 'codes). for example: --ignore=D101,D202') + option('--convention', metavar='', default='', + help='choose the basic list of checked errors by specifying an ' + 'existing convention. for example: --convention=pep257') + option('--add-select', metavar='', default='', + help='amend the list of errors to check for by specifying more ' + 'error codes to check.') + option('--add-ignore', metavar='', default='', + help='amend the list of errors to check for by specifying more ' + 'error codes to ignore.') + option('--match', metavar='', default='(?!test_).*\.py', + help="check only files that exactly match regular " + "expression; default is --match='(?!test_).*\.py' which " + "matches files that don't start with 'test_' but end with " + "'.py'") + option('--match-dir', metavar='', default='[^\.].*', + help="search only dirs that exactly match regular " + "expression; default is --match-dir='[^\.].*', which matches " + "all dirs that don't start with a dot") + option('-d', '--debug', action='store_true', + help='print debug information') + option('-v', '--verbose', action='store_true', + help='print status information') + option('--count', action='store_true', + help='print total number of errors to stdout') + return parser + + +def collect(names, match=lambda name: True, match_dir=lambda name: True): + """Walk dir trees under `names` and generate filnames that `match`. + + Example + ------- + >>> sorted(collect(['non-dir.txt', './'], + ... match=lambda name: name.endswith('.py'))) + ['non-dir.txt', './pep257.py', './setup.py', './test_pep257.py'] + + """ + for name in names: # map(expanduser, names): + if os.path.isdir(name): + for root, dirs, filenames in os.walk(name): + # Skip any dirs that do not match match_dir + dirs[:] = [dir for dir in dirs if match_dir(dir)] + for filename in filenames: + if match(filename): + yield os.path.join(root, filename) + else: + yield name + + +def check(filenames, select=None, ignore=None): + """Generate PEP 257 errors that exist in `filenames` iterable. + + Only returns errors with error-codes defined in `checked_codes` iterable. + + Example + ------- + >>> check(['pep257.py'], checked_codes=['D100']) + + + """ + if select and ignore: + raise ValueError('Cannot pass both select and ignore. They are ' + 'mutually exclusive.') + elif select or ignore: + checked_codes = (select or + set(ErrorRegistry.get_error_codes()) - set(ignore)) + else: + checked_codes = Conventions.pep257 + + for filename in filenames: + log.info('Checking file %s.', filename) + try: + with tokenize_open(filename) as file: + source = file.read() + for error in PEP257Checker().check_source(source, filename): + code = getattr(error, 'code', None) + if code in checked_codes: + yield error + except (EnvironmentError, AllError): + yield sys.exc_info()[1] + except tk.TokenError: + yield SyntaxError('invalid syntax in file %s' % filename) + + +def get_options(args, opt_parser): + config = RawConfigParser() + parent = tail = os.path.abspath(os.path.commonprefix(args)) + config_found = False + while tail and not config_found: + log.info(tail) + for fn in PROJECT_CONFIG: + full_path = os.path.join(parent, fn) + if config.read(full_path): + log.info('local configuration: in %s.', full_path) + config_found = True + break + parent, tail = os.path.split(parent) + + new_options = None + if config.has_section('pep257'): + option_list = dict([(o.dest, o.type or o.action) + for o in opt_parser.option_list]) + + # First, read the default values + new_options, _ = opt_parser.parse_args([]) + + # Second, parse the configuration + pep257_section = 'pep257' + for opt in config.options(pep257_section): + if opt.replace('_', '-') not in opt_parser.config_options: + log.warning("Unknown option '{}' ignored".format(opt)) + continue + normalized_opt = opt.replace('-', '_') + opt_type = option_list[normalized_opt] + if opt_type in ('int', 'count'): + value = config.getint(pep257_section, opt) + elif opt_type == 'string': + value = config.get(pep257_section, opt) + else: + assert opt_type in ('store_true', 'store_false') + value = config.getboolean(pep257_section, opt) + setattr(new_options, normalized_opt, value) + + # Third, overwrite with the command-line options + options, _ = opt_parser.parse_args(values=new_options) + log.debug("options: %s", options) + return options + + +def setup_stream_handlers(options): + """Setup logging stream handlers according to the options.""" + class StdoutFilter(logging.Filter): + def filter(self, record): + return record.levelno in (logging.DEBUG, logging.INFO) + + if log.handlers: + for handler in log.handlers: + log.removeHandler(handler) + + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setLevel(logging.WARNING) + stdout_handler.addFilter(StdoutFilter()) + if options.debug: + stdout_handler.setLevel(logging.DEBUG) + elif options.verbose: + stdout_handler.setLevel(logging.INFO) + else: + stdout_handler.setLevel(logging.WARNING) + log.addHandler(stdout_handler) + + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setLevel(logging.WARNING) + log.addHandler(stderr_handler) + + +def get_checked_error_codes(options): + codes = set(ErrorRegistry.get_error_codes()) + if options.ignore: + checked_codes = codes - set(options.ignore.split(',')) + elif options.select: + checked_codes = set(options.select.split(',')) + elif options.convention: + checked_codes = getattr(Conventions, options.convention) + else: + checked_codes = Conventions.pep257 + checked_codes -= set(options.add_ignore.split(',')) + checked_codes |= set(options.add_select.split(',')) + return checked_codes - set('') + + +def validate_options(options): + mutually_exclusive = ('ignore', 'select', 'convention') + for opt1, opt2 in itertools.permutations(mutually_exclusive, 2): + if getattr(options, opt1) and getattr(options, opt2): + log.error('Cannot pass both {0} and {1}. They are ' + 'mutually exclusive.'.format(opt1, opt2)) + return False + if options.convention and not hasattr(Conventions, options.convention): + return False + return True + + +def run_pep257(): + log.setLevel(logging.DEBUG) + opt_parser = get_option_parser() + # setup the logger before parsing the config file, so that command line + # arguments for debug / verbose will be printed. + options, arguments = opt_parser.parse_args() + setup_stream_handlers(options) + # We parse the files before opening the config file, since it changes where + # we look for the file. + options = get_options(arguments, opt_parser) + if not validate_options(options): + return INVALID_OPTIONS_RETURN_CODE + # Setup the handler again with values from the config file. + setup_stream_handlers(options) + + collected = collect(arguments or ['.'], + match=re(options.match + '$').match, + match_dir=re(options.match_dir + '$').match) + + log.debug("starting pep257 in debug mode.") + + Error.explain = options.explain + Error.source = options.source + collected = list(collected) + checked_codes = get_checked_error_codes(options) + errors = check(collected, select=checked_codes) + code = NO_VIOLATIONS_RETURN_CODE + count = 0 + for error in errors: + sys.stderr.write('%s\n' % error) + code = VIOLATIONS_RETURN_CODE + count += 1 + if options.count: + print(count) + return code + + +parse = Parser() + + +def check_for(kind, terminal=False): + def decorator(f): + f._check_for = kind + f._terminal = terminal + return f + return decorator + + +class PEP257Checker(object): + + """Checker for PEP 257. + + D10x: Missing docstrings + D20x: Whitespace issues + D30x: Docstring formatting + D40x: Docstring content issues + + """ + + def check_source(self, source, filename): + module = parse(StringIO(source), filename) + for definition in module: + for check in self.checks: + terminate = False + if isinstance(definition, check._check_for): + error = check(None, definition, definition.docstring) + errors = error if hasattr(error, '__iter__') else [error] + for error in errors: + if error is not None: + partition = check.__doc__.partition('.\n') + message, _, explanation = partition + error.set_context(explanation=explanation, + definition=definition) + yield error + if check._terminal: + terminate = True + break + if terminate: + break + + @property + def checks(self): + all = [check for check in vars(type(self)).values() + if hasattr(check, '_check_for')] + return sorted(all, key=lambda check: not check._terminal) + + @check_for(Definition, terminal=True) + def check_docstring_missing(self, definition, docstring): + """D10{0,1,2,3}: Public definitions should have docstrings. + + All modules should normally have docstrings. [...] all functions and + classes exported by a module should also have docstrings. Public + methods (including the __init__ constructor) should also have + docstrings. + + Note: Public (exported) definitions are either those with names listed + in __all__ variable (if present), or those that do not start + with a single underscore. + + """ + if (not docstring and definition.is_public or + docstring and is_blank(eval(docstring))): + codes = {Module: D100, Class: D101, NestedClass: D101, + Method: D102, Function: D103, NestedFunction: D103, + Package: D104} + return codes[type(definition)]() + + @check_for(Definition) + def check_one_liners(self, definition, docstring): + """D200: One-liner docstrings should fit on one line with quotes. + + The closing quotes are on the same line as the opening quotes. + This looks better for one-liners. + + """ + if docstring: + lines = eval(docstring).split('\n') + if len(lines) > 1: + non_empty_lines = sum(1 for l in lines if not is_blank(l)) + if non_empty_lines == 1: + return D200(len(lines)) + + @check_for(Function) + def check_no_blank_before(self, function, docstring): # def + """D20{1,2}: No blank lines allowed around function/method docstring. + + There's no blank line either before or after the docstring. + + """ + # NOTE: This does not take comments into account. + # NOTE: This does not take into account functions with groups of code. + if docstring: + before, _, after = function.source.partition(docstring) + blanks_before = list(map(is_blank, before.split('\n')[:-1])) + blanks_after = list(map(is_blank, after.split('\n')[1:])) + blanks_before_count = sum(takewhile(bool, reversed(blanks_before))) + blanks_after_count = sum(takewhile(bool, blanks_after)) + if blanks_before_count != 0: + yield D201(blanks_before_count) + if not all(blanks_after) and blanks_after_count != 0: + yield D202(blanks_after_count) + + @check_for(Class) + def check_blank_before_after_class(slef, class_, docstring): + """D20{3,4}: Class docstring should have 1 blank line around them. + + Insert a blank line before and after all docstrings (one-line or + multi-line) that document a class -- generally speaking, the class's + methods are separated from each other by a single blank line, and the + docstring needs to be offset from the first method by a blank line; + for symmetry, put a blank line between the class header and the + docstring. + + """ + # NOTE: this gives false-positive in this case + # class Foo: + # + # """Docstring.""" + # + # + # # comment here + # def foo(): pass + if docstring: + before, _, after = class_.source.partition(docstring) + blanks_before = list(map(is_blank, before.split('\n')[:-1])) + blanks_after = list(map(is_blank, after.split('\n')[1:])) + blanks_before_count = sum(takewhile(bool, reversed(blanks_before))) + blanks_after_count = sum(takewhile(bool, blanks_after)) + if blanks_before_count != 1: + yield D203(blanks_before_count) + if not all(blanks_after) and blanks_after_count != 1: + yield D204(blanks_after_count) + + @check_for(Definition) + def check_blank_after_summary(self, definition, docstring): + """D205: Put one blank line between summary line and description. + + Multi-line docstrings consist of a summary line just like a one-line + docstring, followed by a blank line, followed by a more elaborate + description. The summary line may be used by automatic indexing tools; + it is important that it fits on one line and is separated from the + rest of the docstring by a blank line. + + """ + if docstring: + lines = eval(docstring).strip().split('\n') + if len(lines) > 1: + post_summary_blanks = list(map(is_blank, lines[1:])) + blanks_count = sum(takewhile(bool, post_summary_blanks)) + if blanks_count != 1: + return D205(blanks_count) + + @check_for(Definition) + def check_indent(self, definition, docstring): + """D20{6,7,8}: The entire docstring should be indented same as code. + + The entire docstring is indented the same as the quotes at its + first line. + + """ + if docstring: + before_docstring, _, _ = definition.source.partition(docstring) + _, _, indent = before_docstring.rpartition('\n') + lines = docstring.split('\n') + if len(lines) > 1: + lines = lines[1:] # First line does not need indent. + indents = [leading_space(l) for l in lines if not is_blank(l)] + if set(' \t') == set(''.join(indents) + indent): + yield D206() + if (len(indents) > 1 and min(indents[:-1]) > indent or + indents[-1] > indent): + yield D208() + if min(indents) < indent: + yield D207() + + @check_for(Definition) + def check_newline_after_last_paragraph(self, definition, docstring): + """D209: Put multi-line docstring closing quotes on separate line. + + Unless the entire docstring fits on a line, place the closing + quotes on a line by themselves. + + """ + if docstring: + lines = [l for l in eval(docstring).split('\n') if not is_blank(l)] + if len(lines) > 1: + if docstring.split("\n")[-1].strip() not in ['"""', "'''"]: + return D209() + + @check_for(Definition) + def check_surrounding_whitespaces(self, definition, docstring): + """D210: No whitespaces allowed surrounding docstring text.""" + if docstring: + lines = eval(docstring).split('\n') + if lines[0].startswith(' ') or \ + len(lines) == 1 and lines[0].endswith(' '): + return D210() + + @check_for(Definition) + def check_triple_double_quotes(self, definition, docstring): + r'''D300: Use """triple double quotes""". + + For consistency, always use """triple double quotes""" around + docstrings. Use r"""raw triple double quotes""" if you use any + backslashes in your docstrings. For Unicode docstrings, use + u"""Unicode triple-quoted strings""". + + Note: Exception to this is made if the docstring contains + """ quotes in its body. + + ''' + if docstring and '"""' in eval(docstring) and docstring.startswith( + ("'''", "r'''", "u'''", "ur'''")): + # Allow ''' quotes if docstring contains """, because otherwise """ + # quotes could not be expressed inside docstring. Not in PEP 257. + return + if docstring and not docstring.startswith( + ('"""', 'r"""', 'u"""', 'ur"""')): + quotes = "'''" if "'''" in docstring[:4] else "'" + return D300(quotes) + + @check_for(Definition) + def check_backslashes(self, definition, docstring): + r'''D301: Use r""" if any backslashes in a docstring. + + Use r"""raw triple double quotes""" if you use any backslashes + (\) in your docstrings. + + ''' + # Just check that docstring is raw, check_triple_double_quotes + # ensures the correct quotes. + if docstring and '\\' in docstring and not docstring.startswith( + ('r', 'ur')): + return D301() + + @check_for(Definition) + def check_unicode_docstring(self, definition, docstring): + r'''D302: Use u""" for docstrings with Unicode. + + For Unicode docstrings, use u"""Unicode triple-quoted strings""". + + ''' + # Just check that docstring is unicode, check_triple_double_quotes + # ensures the correct quotes. + if docstring and sys.version_info[0] <= 2: + if not is_ascii(docstring) and not docstring.startswith( + ('u', 'ur')): + return D302() + + @check_for(Definition) + def check_ends_with_period(self, definition, docstring): + """D400: First line should end with a period. + + The [first line of a] docstring is a phrase ending in a period. + + """ + if docstring: + summary_line = eval(docstring).strip().split('\n')[0] + if not summary_line.endswith('.'): + return D400(summary_line[-1]) + + @check_for(Function) + def check_imperative_mood(self, function, docstring): # def context + """D401: First line should be in imperative mood: 'Do', not 'Does'. + + [Docstring] prescribes the function or method's effect as a command: + ("Do this", "Return that"), not as a description; e.g. don't write + "Returns the pathname ...". + + """ + if docstring: + stripped = eval(docstring).strip() + if stripped: + first_word = stripped.split()[0] + if first_word.endswith('s') and not first_word.endswith('ss'): + return D401(first_word[:-1], first_word) + + @check_for(Function) + def check_no_signature(self, function, docstring): # def context + """D402: First line should not be function's or method's "signature". + + The one-line docstring should NOT be a "signature" reiterating the + function/method parameters (which can be obtained by introspection). + + """ + if docstring: + first_line = eval(docstring).strip().split('\n')[0] + if function.name + '(' in first_line.replace(' ', ''): + return D402() + + # Somewhat hard to determine if return value is mentioned. + # @check(Function) + def SKIP_check_return_type(self, function, docstring): + """D40x: Return value type should be mentioned. + + [T]he nature of the return value cannot be determined by + introspection, so it should be mentioned. + + """ + if docstring and function.returns_value: + if 'return' not in docstring.lower(): + return Error() + + +def main(): + try: + sys.exit(run_pep257()) + except KeyboardInterrupt: + pass + + +if __name__ == '__main__': + main() diff --git a/pymode/libs/pylama/lint/pylama_pep8/pep8.py b/pymode/libs/pep8.py similarity index 89% rename from pymode/libs/pylama/lint/pylama_pep8/pep8.py rename to pymode/libs/pep8.py index 10a3a155..34ce07ae 100644 --- a/pymode/libs/pylama/lint/pylama_pep8/pep8.py +++ b/pymode/libs/pep8.py @@ -2,6 +2,7 @@ # pep8.py - Check Python source code formatting, according to PEP 8 # Copyright (C) 2006-2009 Johann C. Rocholl # Copyright (C) 2009-2014 Florent Xicluna +# Copyright (C) 2014-2015 Ian Lee # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files @@ -46,8 +47,6 @@ """ from __future__ import with_statement -__version__ = '1.6.0a0' - import os import sys import re @@ -63,13 +62,21 @@ except ImportError: from ConfigParser import RawConfigParser -DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__' -DEFAULT_IGNORE = 'E123,E226,E24,E704' -if sys.platform == 'win32': - DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') -else: - DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or - os.path.expanduser('~/.config'), 'pep8') +__version__ = '1.6.3a0' + +DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' +DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704' +try: + if sys.platform == 'win32': + USER_CONFIG = os.path.expanduser(r'~\.pep8') + else: + USER_CONFIG = os.path.join( + os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), + 'pep8' + ) +except ImportError: + USER_CONFIG = None + PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') MAX_LINE_LENGTH = 79 @@ -101,8 +108,9 @@ DOCSTRING_REGEX = re.compile(r'u?r?["\']') EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') -COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)') -COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s') +COMPARE_SINGLETON_REGEX = re.compile(r'\b(None|False|True)?\s*([=!]=)' + r'\s*(?(1)|(None|False|True))\b') +COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^][)(}{ ]+\s+(in|is)\s') COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type' r'|\s*\(\s*([^)]*[^ )])\s*\))') KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) @@ -199,7 +207,6 @@ def maximum_line_length(physical_line, max_line_length, multiline): Reports error E501. """ - max_line_length = int(max_line_length) line = physical_line.rstrip() length = len(line) if length > max_line_length and not noqa(line): @@ -428,6 +435,7 @@ def continued_indentation(logical_line, tokens, indent_level, hang_closing, indent_chances = {} last_indent = tokens[0][2] visual_indent = None + last_token_multiline = False # for each depth, memorize the visual indent column indent = [last_indent[1]] if verbose >= 3: @@ -507,8 +515,9 @@ def continued_indentation(logical_line, tokens, indent_level, hang_closing, yield start, "%s continuation line %s" % error # look for visual indenting - if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) - and not indent[depth]): + if (parens[row] and + token_type not in (tokenize.NL, tokenize.COMMENT) and + not indent[depth]): indent[depth] = start[1] indent_chances[start[1]] = True if verbose >= 4: @@ -681,7 +690,7 @@ def missing_whitespace_around_operator(logical_line, tokens): if need_space is True or need_space[1]: # A needed trailing space was not found yield prev_end, "E225 missing whitespace around operator" - else: + elif prev_text != '**': code, optype = 'E226', 'arithmetic' if prev_text == '%': code, optype = 'E228', 'modulo' @@ -749,6 +758,7 @@ def whitespace_around_named_parameter_equals(logical_line, tokens): Okay: boolean(a != b) Okay: boolean(a <= b) Okay: boolean(a >= b) + Okay: def foo(arg: int = 42): E251: def complex(real, imag = 0.0): E251: return magic(r = real, i = imag) @@ -756,6 +766,8 @@ def whitespace_around_named_parameter_equals(logical_line, tokens): parens = 0 no_space = False prev_end = None + annotated_func_arg = False + in_def = logical_line.startswith('def') message = "E251 unexpected spaces around keyword / parameter equals" for token_type, text, start, end, line in tokens: if token_type == tokenize.NL: @@ -764,15 +776,22 @@ def whitespace_around_named_parameter_equals(logical_line, tokens): no_space = False if start != prev_end: yield (prev_end, message) - elif token_type == tokenize.OP: + if token_type == tokenize.OP: if text == '(': parens += 1 elif text == ')': parens -= 1 - elif parens and text == '=': + elif in_def and text == ':' and parens == 1: + annotated_func_arg = True + elif parens and text == ',' and parens == 1: + annotated_func_arg = False + elif parens and text == '=' and not annotated_func_arg: no_space = True if start != prev_end: yield (prev_end, message) + if not parens: + annotated_func_arg = False + prev_end = end @@ -836,6 +855,56 @@ def imports_on_separate_lines(logical_line): yield found, "E401 multiple imports on one line" +def module_imports_on_top_of_file( + logical_line, indent_level, checker_state, noqa): + r"""Imports are always put at the top of the file, just after any module + comments and docstrings, and before module globals and constants. + + Okay: import os + Okay: # this is a comment\nimport os + Okay: '''this is a module docstring'''\nimport os + Okay: r'''this is a module docstring'''\nimport os + Okay: try:\n import x\nexcept:\n pass\nelse:\n pass\nimport y + Okay: try:\n import x\nexcept:\n pass\nfinally:\n pass\nimport y + E402: a=1\nimport os + E402: 'One string'\n"Two string"\nimport os + E402: a=1\nfrom sys import x + + Okay: if x:\n import os + """ + def is_string_literal(line): + if line[0] in 'uUbB': + line = line[1:] + if line and line[0] in 'rR': + line = line[1:] + return line and (line[0] == '"' or line[0] == "'") + + allowed_try_keywords = ('try', 'except', 'else', 'finally') + + if indent_level: # Allow imports in conditional statements or functions + return + if not logical_line: # Allow empty lines or comments + return + if noqa: + return + line = logical_line + if line.startswith('import ') or line.startswith('from '): + if checker_state.get('seen_non_imports', False): + yield 0, "E402 module level import not at top of file" + elif any(line.startswith(kw) for kw in allowed_try_keywords): + # Allow try, except, else, finally keywords intermixed with imports in + # order to support conditional importing + return + elif is_string_literal(line): + # The first literal is a docstring, allow it. Otherwise, report error. + if checker_state.get('seen_docstring', False): + checker_state['seen_non_imports'] = True + else: + checker_state['seen_docstring'] = True + else: + checker_state['seen_non_imports'] = True + + def compound_statements(logical_line): r"""Compound statements (on the same line) are generally discouraged. @@ -872,8 +941,12 @@ def compound_statements(logical_line): if ((before.count('{') <= before.count('}') and # {'a': 1} (dict) before.count('[') <= before.count(']') and # [1:2] (slice) before.count('(') <= before.count(')'))): # (annotation) - if LAMBDA_REGEX.search(before): - yield 0, "E731 do not assign a lambda expression, use a def" + lambda_kw = LAMBDA_REGEX.search(before) + if lambda_kw: + before = line[:lambda_kw.start()].rstrip() + if before[-1:] == '=' and isidentifier(before[:-1].strip()): + yield 0, ("E731 do not assign a lambda expression, use a " + "def") break if before.startswith('def '): yield 0, "E704 multiple statements on one line (def)" @@ -903,10 +976,15 @@ def explicit_line_join(logical_line, tokens): Okay: aaa = [123,\n 123] Okay: aaa = ("bbb "\n "ccc") Okay: aaa = "bbb " \\n "ccc" + Okay: aaa = 123 # \\ """ prev_start = prev_end = parens = 0 + comment = False + backslash = None for token_type, text, start, end, line in tokens: - if start[0] != prev_start and parens and backslash: + if token_type == tokenize.COMMENT: + comment = True + if start[0] != prev_start and parens and backslash and not comment: yield backslash, "E502 the backslash is redundant between brackets" if end[0] != prev_end: if line.rstrip('\r\n').endswith('\\'): @@ -923,6 +1001,45 @@ def explicit_line_join(logical_line, tokens): parens -= 1 +def break_around_binary_operator(logical_line, tokens): + r""" + Avoid breaks before binary operators. + + The preferred place to break around a binary operator is after the + operator, not before it. + + W503: (width == 0\n + height == 0) + W503: (width == 0\n and height == 0) + + Okay: (width == 0 +\n height == 0) + Okay: foo(\n -x) + Okay: foo(x\n []) + Okay: x = '''\n''' + '' + Okay: foo(x,\n -y) + Okay: foo(x, # comment\n -y) + """ + def is_binary_operator(token_type, text): + # The % character is strictly speaking a binary operator, but the + # common usage seems to be to put it next to the format parameters, + # after a line break. + return ((token_type == tokenize.OP or text in ['and', 'or']) and + text not in "()[]{},:.;@=%") + + line_break = False + unary_context = True + for token_type, text, start, end, line in tokens: + if token_type == tokenize.COMMENT: + continue + if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: + line_break = True + else: + if (is_binary_operator(token_type, text) and line_break and + not unary_context): + yield start, "W503 line break before binary operator" + unary_context = text in '([{,;' + line_break = False + + def comparison_to_singleton(logical_line, noqa): r"""Comparison to singletons should use "is" or "is not". @@ -931,7 +1048,9 @@ def comparison_to_singleton(logical_line, noqa): Okay: if arg is not None: E711: if arg != None: + E711: if None == arg: E712: if arg == True: + E712: if False == arg: Also, beware of writing if x when you really mean if x is not None -- e.g. when testing whether a variable or argument that defaults to None was @@ -940,8 +1059,9 @@ def comparison_to_singleton(logical_line, noqa): """ match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) if match: - same = (match.group(1) == '==') - singleton = match.group(2) + singleton = match.group(1) or match.group(3) + same = (match.group(2) == '==') + msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) if singleton in ('None',): code = 'E711' @@ -950,7 +1070,7 @@ def comparison_to_singleton(logical_line, noqa): nonzero = ((singleton == 'True' and same) or (singleton == 'False' and not same)) msg += " or 'if %scond:'" % ('' if nonzero else 'not ') - yield match.start(1), ("%s comparison to %s should be %s" % + yield match.start(2), ("%s comparison to %s should be %s" % (code, singleton, msg)) @@ -975,7 +1095,7 @@ def comparison_negative(logical_line): yield pos, "E714 test for object identity should be 'is not'" -def comparison_type(logical_line): +def comparison_type(logical_line, noqa): r"""Object type comparisons should always use isinstance(). Do not compare types directly. @@ -991,7 +1111,7 @@ def comparison_type(logical_line): Okay: if type(a1) is type(b1): """ match = COMPARE_TYPE_REGEX.search(logical_line) - if match: + if match and not noqa: inst = match.group(1) if inst and isidentifier(inst) and inst not in SINGLETONS: return # Allow comparison for types which are not obvious @@ -1057,7 +1177,7 @@ def readlines(filename): """Read the source code.""" with open(filename, 'rU') as f: return f.readlines() - isidentifier = re.compile(r'[a-zA-Z_]\w*').match + isidentifier = re.compile(r'[a-zA-Z_]\w*$').match stdin_get_value = sys.stdin.read else: # Python 3 @@ -1156,10 +1276,13 @@ def normalize_paths(value, parent=os.curdir): Return a list of absolute paths. """ - if not value or isinstance(value, list): + if not value: + return [] + if isinstance(value, list): return value paths = [] for path in value.split(','): + path = path.strip() if '/' in path: path = os.path.abspath(os.path.join(parent, path)) paths.append(path.rstrip('/')) @@ -1176,14 +1299,12 @@ def filename_match(filename, patterns, default=True): return any(fnmatch(filename, pattern) for pattern in patterns) +def _is_eol_token(token): + return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' if COMMENT_WITH_NL: - def _is_eol_token(token): - return (token[0] in NEWLINE or - (token[0] == tokenize.COMMENT and token[1] == token[4])) -else: - def _is_eol_token(token): - return token[0] in NEWLINE - + def _is_eol_token(token, _eol_token=_is_eol_token): + return _eol_token(token) or (token[0] == tokenize.COMMENT and + token[1] == token[4]) ############################################################################## # Framework to run all checks @@ -1193,6 +1314,13 @@ def _is_eol_token(token): _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} +def _get_parameters(function): + if sys.version_info >= (3, 3): + return list(inspect.signature(function).parameters) + else: + return inspect.getargspec(function)[0] + + def register_check(check, codes=None): """Register a new check object.""" def _add_check(check, kind, codes, args): @@ -1201,13 +1329,13 @@ def _add_check(check, kind, codes, args): else: _checks[kind][check] = (codes or [''], args) if inspect.isfunction(check): - args = inspect.getargspec(check)[0] + args = _get_parameters(check) if args and args[0] in ('physical_line', 'logical_line'): if codes is None: codes = ERRORCODE_REGEX.findall(check.__doc__ or '') _add_check(check, args[0], codes, args) elif inspect.isclass(check): - if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']: + if _get_parameters(check.__init__)[:2] == ['self', 'tree']: _add_check(check, 'tree', codes, None) @@ -1240,6 +1368,8 @@ def __init__(self, filename=None, lines=None, self.hang_closing = options.hang_closing self.verbose = options.verbose self.filename = filename + # Dictionary where a checker can store its custom state. + self._checker_states = {} if filename is None: self.filename = 'stdin' self.lines = lines or [] @@ -1295,10 +1425,16 @@ def run_check(self, check, argument_names): arguments.append(getattr(self, name)) return check(*arguments) + def init_checker_state(self, name, argument_names): + """ Prepares a custom state for the specific checker plugin.""" + if 'checker_state' in argument_names: + self.checker_state = self._checker_states.setdefault(name, {}) + def check_physical(self, line): """Run all physical checks on a raw input line.""" self.physical_line = line for name, check, argument_names in self._physical_checks: + self.init_checker_state(name, argument_names) result = self.run_check(check, argument_names) if result is not None: (offset, text) = result @@ -1326,8 +1462,8 @@ def build_tokens_line(self): (start_row, start_col) = start if prev_row != start_row: # different row prev_text = self.lines[prev_row - 1][prev_col - 1] - if prev_text == ',' or (prev_text not in '{[(' - and text not in '}])'): + if prev_text == ',' or (prev_text not in '{[(' and + text not in '}])'): text = ' ' + text elif prev_col != start_col: # different column text = line[prev_col:start_col] + text @@ -1343,6 +1479,10 @@ def check_logical(self): """Build a line from tokens and run all logical checks on it.""" self.report.increment_logical_line() mapping = self.build_tokens_line() + + if not mapping: + return + (start_row, start_col) = mapping[0][1] start_line = self.lines[start_row - 1] self.indent_level = expand_indent(start_line[:start_col]) @@ -1353,6 +1493,7 @@ def check_logical(self): for name, check, argument_names in self._logical_checks: if self.verbose >= 4: print(' ' + name) + self.init_checker_state(name, argument_names) for offset, text in self.run_check(check, argument_names) or (): if not isinstance(offset, tuple): for token_offset, pos in mapping: @@ -1370,7 +1511,7 @@ def check_ast(self): """Build the file's AST and run all AST checks.""" try: tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) - except (SyntaxError, TypeError): + except (ValueError, SyntaxError, TypeError): return self.report_invalid_syntax() for name, cls, __ in self._ast_checks: checker = cls(tree, self.filename) @@ -1614,6 +1755,14 @@ def get_file_results(self): print(re.sub(r'\S', ' ', line[:offset]) + '^') if self._show_pep8 and doc: print(' ' + doc.strip()) + + # stdout is block buffered when not stdout.isatty(). + # line can be broken where buffer boundary since other processes + # write to same file. + # flush() after print() to avoid buffer boundary. + # Typical buffer size is 8192. line written safely when + # len(line) < 8192. + sys.stdout.flush() return self.file_errors @@ -1637,7 +1786,7 @@ def __init__(self, *args, **kwargs): # build options from the command line self.checker_class = kwargs.pop('checker_class', Checker) parse_argv = kwargs.pop('parse_argv', False) - config_file = kwargs.pop('config_file', None) + config_file = kwargs.pop('config_file', False) parser = kwargs.pop('parser', None) # build options from dict options_dict = dict(*args, **kwargs) @@ -1790,7 +1939,8 @@ def get_parser(prog='pep8', version=__version__): parser.add_option('--select', metavar='errors', default='', help="select errors and warnings (e.g. E,W6)") parser.add_option('--ignore', metavar='errors', default='', - help="skip errors and warnings (e.g. E4,W)") + help="skip errors and warnings (e.g. E4,W) " + "(default: %s)" % DEFAULT_IGNORE) parser.add_option('--show-source', action='store_true', help="show source code for each error") parser.add_option('--show-pep8', action='store_true', @@ -1826,25 +1976,40 @@ def get_parser(prog='pep8', version=__version__): def read_config(options, args, arglist, parser): - """Read both user configuration and local configuration.""" + """Read and parse configurations + + If a config file is specified on the command line with the "--config" + option, then only it is used for configuration. + + Otherwise, the user configuration (~/.config/pep8) and any local + configurations in the current directory or above will be merged together + (in that order) using the read method of ConfigParser. + """ config = RawConfigParser() - user_conf = options.config - if user_conf and os.path.isfile(user_conf): - if options.verbose: - print('user configuration: %s' % user_conf) - config.read(user_conf) + cli_conf = options.config local_dir = os.curdir + + if USER_CONFIG and os.path.isfile(USER_CONFIG): + if options.verbose: + print('user configuration: %s' % USER_CONFIG) + config.read(USER_CONFIG) + parent = tail = args and os.path.abspath(os.path.commonprefix(args)) while tail: - if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]): + if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG): local_dir = parent if options.verbose: print('local configuration: in %s' % parent) break (parent, tail) = os.path.split(parent) + if cli_conf and os.path.isfile(cli_conf): + if options.verbose: + print('cli configuration: %s' % cli_conf) + config.read(cli_conf) + pep8_section = parser.prog if config.has_section(pep8_section): option_list = dict([(o.dest, o.type or o.action) @@ -1881,19 +2046,21 @@ def read_config(options, args, arglist, parser): def process_options(arglist=None, parse_argv=False, config_file=None, parser=None): - """Process options passed either via arglist or via command line args.""" + """Process options passed either via arglist or via command line args. + + Passing in the ``config_file`` parameter allows other tools, such as flake8 + to specify their own options to be processed in pep8. + """ if not parser: parser = get_parser() if not parser.has_option('--config'): - if config_file is True: - config_file = DEFAULT_CONFIG group = parser.add_option_group("Configuration", description=( "The project options are read from the [%s] section of the " "tox.ini file or the setup.cfg file located in any parent folder " "of the path(s) being processed. Allowed options are: %s." % (parser.prog, ', '.join(parser.config_options)))) group.add_option('--config', metavar='path', default=config_file, - help="user config file location (default: %default)") + help="user config file location") # Don't read the command line if the module is used as a library. if not arglist and not parse_argv: arglist = [] @@ -1938,7 +2105,7 @@ def _main(): except AttributeError: pass # not supported on Windows - pep8style = StyleGuide(parse_argv=True, config_file=True) + pep8style = StyleGuide(parse_argv=True) options = pep8style.options if options.doctest or options.testsuite: from testsuite.support import run_tests diff --git a/pymode/libs/pkg_resources/__init__.py b/pymode/libs/pkg_resources/__init__.py new file mode 100644 index 00000000..42ddcf7c --- /dev/null +++ b/pymode/libs/pkg_resources/__init__.py @@ -0,0 +1,3113 @@ +""" +Package resource API +-------------------- + +A resource is a logical file contained within a package, or a logical +subdirectory thereof. The package resource API expects resource names +to have their path parts separated with ``/``, *not* whatever the local +path separator is. Do not use os.path operations to manipulate resource +names being passed into the API. + +The package resource API is designed to work with normal filesystem packages, +.egg files, and unpacked .egg files. It can also work in a limited way with +.zip files and with custom PEP 302 loaders that support the ``get_data()`` +method. +""" + +from __future__ import absolute_import + +import sys +import os +import io +import time +import re +import types +import zipfile +import zipimport +import warnings +import stat +import functools +import pkgutil +import token +import symbol +import operator +import platform +import collections +import plistlib +import email.parser +import tempfile +import textwrap +from pkgutil import get_importer + +try: + import _imp +except ImportError: + # Python 3.2 compatibility + import imp as _imp + +PY3 = sys.version_info > (3,) +PY2 = not PY3 + +if PY3: + from urllib.parse import urlparse, urlunparse + +if PY2: + from urlparse import urlparse, urlunparse + +if PY3: + string_types = str, +else: + string_types = str, eval('unicode') + +iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems() + +# capture these to bypass sandboxing +from os import utime +try: + from os import mkdir, rename, unlink + WRITE_SUPPORT = True +except ImportError: + # no write support, probably under GAE + WRITE_SUPPORT = False + +from os import open as os_open +from os.path import isdir, split + +# Avoid try/except due to potential problems with delayed import mechanisms. +if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": + import importlib.machinery as importlib_machinery +else: + importlib_machinery = None + +try: + import parser +except ImportError: + pass + +try: + import pkg_resources._vendor.packaging.version + import pkg_resources._vendor.packaging.specifiers + packaging = pkg_resources._vendor.packaging +except ImportError: + # fallback to naturally-installed version; allows system packagers to + # omit vendored packages. + import packaging.version + import packaging.specifiers + + +# declare some globals that will be defined later to +# satisfy the linters. +require = None +working_set = None + + +class PEP440Warning(RuntimeWarning): + """ + Used when there is an issue with a version or specifier not complying with + PEP 440. + """ + + +class _SetuptoolsVersionMixin(object): + + def __hash__(self): + return super(_SetuptoolsVersionMixin, self).__hash__() + + def __lt__(self, other): + if isinstance(other, tuple): + return tuple(self) < other + else: + return super(_SetuptoolsVersionMixin, self).__lt__(other) + + def __le__(self, other): + if isinstance(other, tuple): + return tuple(self) <= other + else: + return super(_SetuptoolsVersionMixin, self).__le__(other) + + def __eq__(self, other): + if isinstance(other, tuple): + return tuple(self) == other + else: + return super(_SetuptoolsVersionMixin, self).__eq__(other) + + def __ge__(self, other): + if isinstance(other, tuple): + return tuple(self) >= other + else: + return super(_SetuptoolsVersionMixin, self).__ge__(other) + + def __gt__(self, other): + if isinstance(other, tuple): + return tuple(self) > other + else: + return super(_SetuptoolsVersionMixin, self).__gt__(other) + + def __ne__(self, other): + if isinstance(other, tuple): + return tuple(self) != other + else: + return super(_SetuptoolsVersionMixin, self).__ne__(other) + + def __getitem__(self, key): + return tuple(self)[key] + + def __iter__(self): + component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) + replace = { + 'pre': 'c', + 'preview': 'c', + '-': 'final-', + 'rc': 'c', + 'dev': '@', + }.get + + def _parse_version_parts(s): + for part in component_re.split(s): + part = replace(part, part) + if not part or part == '.': + continue + if part[:1] in '0123456789': + # pad for numeric comparison + yield part.zfill(8) + else: + yield '*'+part + + # ensure that alpha/beta/candidate are before final + yield '*final' + + def old_parse_version(s): + parts = [] + for part in _parse_version_parts(s.lower()): + if part.startswith('*'): + # remove '-' before a prerelease tag + if part < '*final': + while parts and parts[-1] == '*final-': + parts.pop() + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == '00000000': + parts.pop() + parts.append(part) + return tuple(parts) + + # Warn for use of this function + warnings.warn( + "You have iterated over the result of " + "pkg_resources.parse_version. This is a legacy behavior which is " + "inconsistent with the new version class introduced in setuptools " + "8.0. In most cases, conversion to a tuple is unnecessary. For " + "comparison of versions, sort the Version instances directly. If " + "you have another use case requiring the tuple, please file a " + "bug with the setuptools project describing that need.", + RuntimeWarning, + stacklevel=1, + ) + + for part in old_parse_version(str(self)): + yield part + + +class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): + pass + + +class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, + packaging.version.LegacyVersion): + pass + + +def parse_version(v): + try: + return SetuptoolsVersion(v) + except packaging.version.InvalidVersion: + return SetuptoolsLegacyVersion(v) + + +_state_vars = {} + +def _declare_state(vartype, **kw): + globals().update(kw) + _state_vars.update(dict.fromkeys(kw, vartype)) + +def __getstate__(): + state = {} + g = globals() + for k, v in _state_vars.items(): + state[k] = g['_sget_'+v](g[k]) + return state + +def __setstate__(state): + g = globals() + for k, v in state.items(): + g['_sset_'+_state_vars[k]](k, g[k], v) + return state + +def _sget_dict(val): + return val.copy() + +def _sset_dict(key, ob, state): + ob.clear() + ob.update(state) + +def _sget_object(val): + return val.__getstate__() + +def _sset_object(key, ob, state): + ob.__setstate__(state) + +_sget_none = _sset_none = lambda *args: None + + +def get_supported_platform(): + """Return this platform's maximum compatible version. + + distutils.util.get_platform() normally reports the minimum version + of Mac OS X that would be required to *use* extensions produced by + distutils. But what we want when checking compatibility is to know the + version of Mac OS X that we are *running*. To allow usage of packages that + explicitly require a newer version of Mac OS X, we must also know the + current version of the OS. + + If this condition occurs for any other platform with a version in its + platform strings, this function should be extended accordingly. + """ + plat = get_build_platform() + m = macosVersionString.match(plat) + if m is not None and sys.platform == "darwin": + try: + plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) + except ValueError: + # not Mac OS X + pass + return plat + +__all__ = [ + # Basic resource access and distribution/entry point discovery + 'require', 'run_script', 'get_provider', 'get_distribution', + 'load_entry_point', 'get_entry_map', 'get_entry_info', + 'iter_entry_points', + 'resource_string', 'resource_stream', 'resource_filename', + 'resource_listdir', 'resource_exists', 'resource_isdir', + + # Environmental control + 'declare_namespace', 'working_set', 'add_activation_listener', + 'find_distributions', 'set_extraction_path', 'cleanup_resources', + 'get_default_cache', + + # Primary implementation classes + 'Environment', 'WorkingSet', 'ResourceManager', + 'Distribution', 'Requirement', 'EntryPoint', + + # Exceptions + 'ResolutionError', 'VersionConflict', 'DistributionNotFound', + 'UnknownExtra', 'ExtractionError', + + # Warnings + 'PEP440Warning', + + # Parsing functions and string utilities + 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', + 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', + 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', + + # filesystem utilities + 'ensure_directory', 'normalize_path', + + # Distribution "precedence" constants + 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', + + # "Provider" interfaces, implementations, and registration/lookup APIs + 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', + 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', + 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', + 'register_finder', 'register_namespace_handler', 'register_loader_type', + 'fixup_namespace_packages', 'get_importer', + + # Deprecated/backward compatibility only + 'run_main', 'AvailableDistributions', +] + +class ResolutionError(Exception): + """Abstract base for dependency resolution errors""" + def __repr__(self): + return self.__class__.__name__+repr(self.args) + + +class VersionConflict(ResolutionError): + """ + An already-installed version conflicts with the requested version. + + Should be initialized with the installed Distribution and the requested + Requirement. + """ + + _template = "{self.dist} is installed but {self.req} is required" + + @property + def dist(self): + return self.args[0] + + @property + def req(self): + return self.args[1] + + def report(self): + return self._template.format(**locals()) + + def with_context(self, required_by): + """ + If required_by is non-empty, return a version of self that is a + ContextualVersionConflict. + """ + if not required_by: + return self + args = self.args + (required_by,) + return ContextualVersionConflict(*args) + + +class ContextualVersionConflict(VersionConflict): + """ + A VersionConflict that accepts a third parameter, the set of the + requirements that required the installed Distribution. + """ + + _template = VersionConflict._template + ' by {self.required_by}' + + @property + def required_by(self): + return self.args[2] + + +class DistributionNotFound(ResolutionError): + """A requested distribution was not found""" + + _template = ("The '{self.req}' distribution was not found " + "and is required by {self.requirers_str}") + + @property + def req(self): + return self.args[0] + + @property + def requirers(self): + return self.args[1] + + @property + def requirers_str(self): + if not self.requirers: + return 'the application' + return ', '.join(self.requirers) + + def report(self): + return self._template.format(**locals()) + + def __str__(self): + return self.report() + + +class UnknownExtra(ResolutionError): + """Distribution doesn't have an "extra feature" of the given name""" +_provider_factories = {} + +PY_MAJOR = sys.version[:3] +EGG_DIST = 3 +BINARY_DIST = 2 +SOURCE_DIST = 1 +CHECKOUT_DIST = 0 +DEVELOP_DIST = -1 + +def register_loader_type(loader_type, provider_factory): + """Register `provider_factory` to make providers for `loader_type` + + `loader_type` is the type or class of a PEP 302 ``module.__loader__``, + and `provider_factory` is a function that, passed a *module* object, + returns an ``IResourceProvider`` for that module. + """ + _provider_factories[loader_type] = provider_factory + +def get_provider(moduleOrReq): + """Return an IResourceProvider for the named module or requirement""" + if isinstance(moduleOrReq, Requirement): + return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] + try: + module = sys.modules[moduleOrReq] + except KeyError: + __import__(moduleOrReq) + module = sys.modules[moduleOrReq] + loader = getattr(module, '__loader__', None) + return _find_adapter(_provider_factories, loader)(module) + +def _macosx_vers(_cache=[]): + if not _cache: + version = platform.mac_ver()[0] + # fallback for MacPorts + if version == '': + plist = '/System/Library/CoreServices/SystemVersion.plist' + if os.path.exists(plist): + if hasattr(plistlib, 'readPlist'): + plist_content = plistlib.readPlist(plist) + if 'ProductVersion' in plist_content: + version = plist_content['ProductVersion'] + + _cache.append(version.split('.')) + return _cache[0] + +def _macosx_arch(machine): + return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) + +def get_build_platform(): + """Return this platform's string for platform-specific distributions + + XXX Currently this is the same as ``distutils.util.get_platform()``, but it + needs some hacks for Linux and Mac OS X. + """ + try: + # Python 2.7 or >=3.2 + from sysconfig import get_platform + except ImportError: + from distutils.util import get_platform + + plat = get_platform() + if sys.platform == "darwin" and not plat.startswith('macosx-'): + try: + version = _macosx_vers() + machine = os.uname()[4].replace(" ", "_") + return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), + _macosx_arch(machine)) + except ValueError: + # if someone is running a non-Mac darwin system, this will fall + # through to the default implementation + pass + return plat + +macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") +darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") +# XXX backward compat +get_platform = get_build_platform + + +def compatible_platforms(provided, required): + """Can code for the `provided` platform run on the `required` platform? + + Returns true if either platform is ``None``, or the platforms are equal. + + XXX Needs compatibility checks for Linux and other unixy OSes. + """ + if provided is None or required is None or provided==required: + # easy case + return True + + # Mac OS X special cases + reqMac = macosVersionString.match(required) + if reqMac: + provMac = macosVersionString.match(provided) + + # is this a Mac package? + if not provMac: + # this is backwards compatibility for packages built before + # setuptools 0.6. All packages built after this point will + # use the new macosx designation. + provDarwin = darwinVersionString.match(provided) + if provDarwin: + dversion = int(provDarwin.group(1)) + macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) + if dversion == 7 and macosversion >= "10.3" or \ + dversion == 8 and macosversion >= "10.4": + return True + # egg isn't macosx or legacy darwin + return False + + # are they the same major version and machine type? + if provMac.group(1) != reqMac.group(1) or \ + provMac.group(3) != reqMac.group(3): + return False + + # is the required OS major update >= the provided one? + if int(provMac.group(2)) > int(reqMac.group(2)): + return False + + return True + + # XXX Linux and other platforms' special cases should go here + return False + + +def run_script(dist_spec, script_name): + """Locate distribution `dist_spec` and run its `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + require(dist_spec)[0].run_script(script_name, ns) + +# backward compatibility +run_main = run_script + +def get_distribution(dist): + """Return a current distribution object for a Requirement or string""" + if isinstance(dist, string_types): + dist = Requirement.parse(dist) + if isinstance(dist, Requirement): + dist = get_provider(dist) + if not isinstance(dist, Distribution): + raise TypeError("Expected string, Requirement, or Distribution", dist) + return dist + +def load_entry_point(dist, group, name): + """Return `name` entry point of `group` for `dist` or raise ImportError""" + return get_distribution(dist).load_entry_point(group, name) + +def get_entry_map(dist, group=None): + """Return the entry point map for `group`, or the full entry map""" + return get_distribution(dist).get_entry_map(group) + +def get_entry_info(dist, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return get_distribution(dist).get_entry_info(group, name) + + +class IMetadataProvider: + + def has_metadata(name): + """Does the package's distribution contain the named metadata?""" + + def get_metadata(name): + """The named metadata resource as a string""" + + def get_metadata_lines(name): + """Yield named metadata resource as list of non-blank non-comment lines + + Leading and trailing whitespace is stripped from each line, and lines + with ``#`` as the first non-blank character are omitted.""" + + def metadata_isdir(name): + """Is the named metadata a directory? (like ``os.path.isdir()``)""" + + def metadata_listdir(name): + """List of metadata names in the directory (like ``os.listdir()``)""" + + def run_script(script_name, namespace): + """Execute the named script in the supplied namespace dictionary""" + + +class IResourceProvider(IMetadataProvider): + """An object that provides access to package resources""" + + def get_resource_filename(manager, resource_name): + """Return a true filesystem path for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_stream(manager, resource_name): + """Return a readable file-like object for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_string(manager, resource_name): + """Return a string containing the contents of `resource_name` + + `manager` must be an ``IResourceManager``""" + + def has_resource(resource_name): + """Does the package contain the named resource?""" + + def resource_isdir(resource_name): + """Is the named resource a directory? (like ``os.path.isdir()``)""" + + def resource_listdir(resource_name): + """List of resource names in the directory (like ``os.listdir()``)""" + + +class WorkingSet(object): + """A collection of active distributions on sys.path (or a similar list)""" + + def __init__(self, entries=None): + """Create working set from list of path entries (default=sys.path)""" + self.entries = [] + self.entry_keys = {} + self.by_key = {} + self.callbacks = [] + + if entries is None: + entries = sys.path + + for entry in entries: + self.add_entry(entry) + + @classmethod + def _build_master(cls): + """ + Prepare the master working set. + """ + ws = cls() + try: + from __main__ import __requires__ + except ImportError: + # The main program does not list any requirements + return ws + + # ensure the requirements are met + try: + ws.require(__requires__) + except VersionConflict: + return cls._build_from_requirements(__requires__) + + return ws + + @classmethod + def _build_from_requirements(cls, req_spec): + """ + Build a working set from a requirement spec. Rewrites sys.path. + """ + # try it without defaults already on sys.path + # by starting with an empty path + ws = cls([]) + reqs = parse_requirements(req_spec) + dists = ws.resolve(reqs, Environment()) + for dist in dists: + ws.add(dist) + + # add any missing entries from sys.path + for entry in sys.path: + if entry not in ws.entries: + ws.add_entry(entry) + + # then copy back to sys.path + sys.path[:] = ws.entries + return ws + + def add_entry(self, entry): + """Add a path item to ``.entries``, finding any distributions on it + + ``find_distributions(entry, True)`` is used to find distributions + corresponding to the path entry, and they are added. `entry` is + always appended to ``.entries``, even if it is already present. + (This is because ``sys.path`` can contain the same value more than + once, and the ``.entries`` of the ``sys.path`` WorkingSet should always + equal ``sys.path``.) + """ + self.entry_keys.setdefault(entry, []) + self.entries.append(entry) + for dist in find_distributions(entry, True): + self.add(dist, entry, False) + + def __contains__(self, dist): + """True if `dist` is the active distribution for its project""" + return self.by_key.get(dist.key) == dist + + def find(self, req): + """Find a distribution matching requirement `req` + + If there is an active distribution for the requested project, this + returns it as long as it meets the version requirement specified by + `req`. But, if there is an active distribution for the project and it + does *not* meet the `req` requirement, ``VersionConflict`` is raised. + If there is no active distribution for the requested project, ``None`` + is returned. + """ + dist = self.by_key.get(req.key) + if dist is not None and dist not in req: + # XXX add more info + raise VersionConflict(dist, req) + return dist + + def iter_entry_points(self, group, name=None): + """Yield entry point objects from `group` matching `name` + + If `name` is None, yields all entry points in `group` from all + distributions in the working set, otherwise only ones matching + both `group` and `name` are yielded (in distribution order). + """ + for dist in self: + entries = dist.get_entry_map(group) + if name is None: + for ep in entries.values(): + yield ep + elif name in entries: + yield entries[name] + + def run_script(self, requires, script_name): + """Locate distribution for `requires` and run `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + self.require(requires)[0].run_script(script_name, ns) + + def __iter__(self): + """Yield distributions for non-duplicate projects in the working set + + The yield order is the order in which the items' path entries were + added to the working set. + """ + seen = {} + for item in self.entries: + if item not in self.entry_keys: + # workaround a cache issue + continue + + for key in self.entry_keys[item]: + if key not in seen: + seen[key]=1 + yield self.by_key[key] + + def add(self, dist, entry=None, insert=True, replace=False): + """Add `dist` to working set, associated with `entry` + + If `entry` is unspecified, it defaults to the ``.location`` of `dist`. + On exit from this routine, `entry` is added to the end of the working + set's ``.entries`` (if it wasn't already present). + + `dist` is only added to the working set if it's for a project that + doesn't already have a distribution in the set, unless `replace=True`. + If it's added, any callbacks registered with the ``subscribe()`` method + will be called. + """ + if insert: + dist.insert_on(self.entries, entry) + + if entry is None: + entry = dist.location + keys = self.entry_keys.setdefault(entry,[]) + keys2 = self.entry_keys.setdefault(dist.location,[]) + if not replace and dist.key in self.by_key: + # ignore hidden distros + return + + self.by_key[dist.key] = dist + if dist.key not in keys: + keys.append(dist.key) + if dist.key not in keys2: + keys2.append(dist.key) + self._added_new(dist) + + def resolve(self, requirements, env=None, installer=None, + replace_conflicting=False): + """List all distributions needed to (recursively) meet `requirements` + + `requirements` must be a sequence of ``Requirement`` objects. `env`, + if supplied, should be an ``Environment`` instance. If + not supplied, it defaults to all distributions available within any + entry or distribution in the working set. `installer`, if supplied, + will be invoked with each requirement that cannot be met by an + already-installed distribution; it should return a ``Distribution`` or + ``None``. + + Unless `replace_conflicting=True`, raises a VersionConflict exception if + any requirements are found on the path that have the correct name but + the wrong version. Otherwise, if an `installer` is supplied it will be + invoked to obtain the correct version of the requirement and activate + it. + """ + + # set up the stack + requirements = list(requirements)[::-1] + # set of processed requirements + processed = {} + # key -> dist + best = {} + to_activate = [] + + # Mapping of requirement to set of distributions that required it; + # useful for reporting info about conflicts. + required_by = collections.defaultdict(set) + + while requirements: + # process dependencies breadth-first + req = requirements.pop(0) + if req in processed: + # Ignore cyclic or redundant dependencies + continue + dist = best.get(req.key) + if dist is None: + # Find the best distribution and add it to the map + dist = self.by_key.get(req.key) + if dist is None or (dist not in req and replace_conflicting): + ws = self + if env is None: + if dist is None: + env = Environment(self.entries) + else: + # Use an empty environment and workingset to avoid + # any further conflicts with the conflicting + # distribution + env = Environment([]) + ws = WorkingSet([]) + dist = best[req.key] = env.best_match(req, ws, installer) + if dist is None: + requirers = required_by.get(req, None) + raise DistributionNotFound(req, requirers) + to_activate.append(dist) + if dist not in req: + # Oops, the "best" so far conflicts with a dependency + dependent_req = required_by[req] + raise VersionConflict(dist, req).with_context(dependent_req) + + # push the new requirements onto the stack + new_requirements = dist.requires(req.extras)[::-1] + requirements.extend(new_requirements) + + # Register the new requirements needed by req + for new_requirement in new_requirements: + required_by[new_requirement].add(req.project_name) + + processed[req] = True + + # return list of distros to activate + return to_activate + + def find_plugins(self, plugin_env, full_env=None, installer=None, + fallback=True): + """Find all activatable distributions in `plugin_env` + + Example usage:: + + distributions, errors = working_set.find_plugins( + Environment(plugin_dirlist) + ) + # add plugins+libs to sys.path + map(working_set.add, distributions) + # display errors + print('Could not load', errors) + + The `plugin_env` should be an ``Environment`` instance that contains + only distributions that are in the project's "plugin directory" or + directories. The `full_env`, if supplied, should be an ``Environment`` + contains all currently-available distributions. If `full_env` is not + supplied, one is created automatically from the ``WorkingSet`` this + method is called on, which will typically mean that every directory on + ``sys.path`` will be scanned for distributions. + + `installer` is a standard installer callback as used by the + ``resolve()`` method. The `fallback` flag indicates whether we should + attempt to resolve older versions of a plugin if the newest version + cannot be resolved. + + This method returns a 2-tuple: (`distributions`, `error_info`), where + `distributions` is a list of the distributions found in `plugin_env` + that were loadable, along with any other distributions that are needed + to resolve their dependencies. `error_info` is a dictionary mapping + unloadable plugin distributions to an exception instance describing the + error that occurred. Usually this will be a ``DistributionNotFound`` or + ``VersionConflict`` instance. + """ + + plugin_projects = list(plugin_env) + # scan project names in alphabetic order + plugin_projects.sort() + + error_info = {} + distributions = {} + + if full_env is None: + env = Environment(self.entries) + env += plugin_env + else: + env = full_env + plugin_env + + shadow_set = self.__class__([]) + # put all our entries in shadow_set + list(map(shadow_set.add, self)) + + for project_name in plugin_projects: + + for dist in plugin_env[project_name]: + + req = [dist.as_requirement()] + + try: + resolvees = shadow_set.resolve(req, env, installer) + + except ResolutionError as v: + # save error info + error_info[dist] = v + if fallback: + # try the next older version of project + continue + else: + # give up on this project, keep going + break + + else: + list(map(shadow_set.add, resolvees)) + distributions.update(dict.fromkeys(resolvees)) + + # success, no need to try any more versions of this project + break + + distributions = list(distributions) + distributions.sort() + + return distributions, error_info + + def require(self, *requirements): + """Ensure that distributions matching `requirements` are activated + + `requirements` must be a string or a (possibly-nested) sequence + thereof, specifying the distributions and versions required. The + return value is a sequence of the distributions that needed to be + activated to fulfill the requirements; all relevant distributions are + included, even if they were already activated in this working set. + """ + needed = self.resolve(parse_requirements(requirements)) + + for dist in needed: + self.add(dist) + + return needed + + def subscribe(self, callback): + """Invoke `callback` for all distributions (including existing ones)""" + if callback in self.callbacks: + return + self.callbacks.append(callback) + for dist in self: + callback(dist) + + def _added_new(self, dist): + for callback in self.callbacks: + callback(dist) + + def __getstate__(self): + return ( + self.entries[:], self.entry_keys.copy(), self.by_key.copy(), + self.callbacks[:] + ) + + def __setstate__(self, e_k_b_c): + entries, keys, by_key, callbacks = e_k_b_c + self.entries = entries[:] + self.entry_keys = keys.copy() + self.by_key = by_key.copy() + self.callbacks = callbacks[:] + + +class Environment(object): + """Searchable snapshot of distributions on a search path""" + + def __init__(self, search_path=None, platform=get_supported_platform(), + python=PY_MAJOR): + """Snapshot distributions available on a search path + + Any distributions found on `search_path` are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. + + `platform` is an optional string specifying the name of the platform + that platform-specific distributions must be compatible with. If + unspecified, it defaults to the current platform. `python` is an + optional string naming the desired version of Python (e.g. ``'3.3'``); + it defaults to the current version. + + You may explicitly set `platform` (and/or `python`) to ``None`` if you + wish to map *all* distributions, not just those compatible with the + running platform or Python version. + """ + self._distmap = {} + self.platform = platform + self.python = python + self.scan(search_path) + + def can_add(self, dist): + """Is distribution `dist` acceptable for this environment? + + The distribution must match the platform and python version + requirements specified when this environment was created, or False + is returned. + """ + return (self.python is None or dist.py_version is None + or dist.py_version==self.python) \ + and compatible_platforms(dist.platform, self.platform) + + def remove(self, dist): + """Remove `dist` from the environment""" + self._distmap[dist.key].remove(dist) + + def scan(self, search_path=None): + """Scan `search_path` for distributions usable in this environment + + Any distributions found are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. Only distributions conforming to + the platform/python version defined at initialization are added. + """ + if search_path is None: + search_path = sys.path + + for item in search_path: + for dist in find_distributions(item): + self.add(dist) + + def __getitem__(self, project_name): + """Return a newest-to-oldest list of distributions for `project_name` + + Uses case-insensitive `project_name` comparison, assuming all the + project's distributions use their project's name converted to all + lowercase as their key. + + """ + distribution_key = project_name.lower() + return self._distmap.get(distribution_key, []) + + def add(self, dist): + """Add `dist` if we ``can_add()`` it and it has not already been added + """ + if self.can_add(dist) and dist.has_version(): + dists = self._distmap.setdefault(dist.key, []) + if dist not in dists: + dists.append(dist) + dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) + + def best_match(self, req, working_set, installer=None): + """Find distribution best matching `req` and usable on `working_set` + + This calls the ``find(req)`` method of the `working_set` to see if a + suitable distribution is already active. (This may raise + ``VersionConflict`` if an unsuitable version of the project is already + active in the specified `working_set`.) If a suitable distribution + isn't active, this method returns the newest distribution in the + environment that meets the ``Requirement`` in `req`. If no suitable + distribution is found, and `installer` is supplied, then the result of + calling the environment's ``obtain(req, installer)`` method will be + returned. + """ + dist = working_set.find(req) + if dist is not None: + return dist + for dist in self[req.key]: + if dist in req: + return dist + # try to download/install + return self.obtain(req, installer) + + def obtain(self, requirement, installer=None): + """Obtain a distribution matching `requirement` (e.g. via download) + + Obtain a distro that matches requirement (e.g. via download). In the + base ``Environment`` class, this routine just returns + ``installer(requirement)``, unless `installer` is None, in which case + None is returned instead. This method is a hook that allows subclasses + to attempt other ways of obtaining a distribution before falling back + to the `installer` argument.""" + if installer is not None: + return installer(requirement) + + def __iter__(self): + """Yield the unique project names of the available distributions""" + for key in self._distmap.keys(): + if self[key]: + yield key + + def __iadd__(self, other): + """In-place addition of a distribution or environment""" + if isinstance(other, Distribution): + self.add(other) + elif isinstance(other, Environment): + for project in other: + for dist in other[project]: + self.add(dist) + else: + raise TypeError("Can't add %r to environment" % (other,)) + return self + + def __add__(self, other): + """Add an environment or distribution to an environment""" + new = self.__class__([], platform=None, python=None) + for env in self, other: + new += env + return new + + +# XXX backward compatibility +AvailableDistributions = Environment + + +class ExtractionError(RuntimeError): + """An error occurred extracting a resource + + The following attributes are available from instances of this exception: + + manager + The resource manager that raised this exception + + cache_path + The base directory for resource extraction + + original_error + The exception instance that caused extraction to fail + """ + + +class ResourceManager: + """Manage resource extraction and packages""" + extraction_path = None + + def __init__(self): + self.cached_files = {} + + def resource_exists(self, package_or_requirement, resource_name): + """Does the named resource exist?""" + return get_provider(package_or_requirement).has_resource(resource_name) + + def resource_isdir(self, package_or_requirement, resource_name): + """Is the named resource an existing directory?""" + return get_provider(package_or_requirement).resource_isdir( + resource_name + ) + + def resource_filename(self, package_or_requirement, resource_name): + """Return a true filesystem path for specified resource""" + return get_provider(package_or_requirement).get_resource_filename( + self, resource_name + ) + + def resource_stream(self, package_or_requirement, resource_name): + """Return a readable file-like object for specified resource""" + return get_provider(package_or_requirement).get_resource_stream( + self, resource_name + ) + + def resource_string(self, package_or_requirement, resource_name): + """Return specified resource as a string""" + return get_provider(package_or_requirement).get_resource_string( + self, resource_name + ) + + def resource_listdir(self, package_or_requirement, resource_name): + """List the contents of the named resource directory""" + return get_provider(package_or_requirement).resource_listdir( + resource_name + ) + + def extraction_error(self): + """Give an error message for problems extracting file(s)""" + + old_exc = sys.exc_info()[1] + cache_path = self.extraction_path or get_default_cache() + + err = ExtractionError("""Can't extract file(s) to egg cache + +The following error occurred while trying to extract file(s) to the Python egg +cache: + + %s + +The Python egg cache directory is currently set to: + + %s + +Perhaps your account does not have write access to this directory? You can +change the cache directory by setting the PYTHON_EGG_CACHE environment +variable to point to an accessible directory. +""" % (old_exc, cache_path) + ) + err.manager = self + err.cache_path = cache_path + err.original_error = old_exc + raise err + + def get_cache_path(self, archive_name, names=()): + """Return absolute location in cache for `archive_name` and `names` + + The parent directory of the resulting path will be created if it does + not already exist. `archive_name` should be the base filename of the + enclosing egg (which may not be the name of the enclosing zipfile!), + including its ".egg" extension. `names`, if provided, should be a + sequence of path name parts "under" the egg's extraction location. + + This method should only be called by resource providers that need to + obtain an extraction location, and only for names they intend to + extract, as it tracks the generated names for possible cleanup later. + """ + extract_path = self.extraction_path or get_default_cache() + target_path = os.path.join(extract_path, archive_name+'-tmp', *names) + try: + _bypass_ensure_directory(target_path) + except: + self.extraction_error() + + self._warn_unsafe_extraction_path(extract_path) + + self.cached_files[target_path] = 1 + return target_path + + @staticmethod + def _warn_unsafe_extraction_path(path): + """ + If the default extraction path is overridden and set to an insecure + location, such as /tmp, it opens up an opportunity for an attacker to + replace an extracted file with an unauthorized payload. Warn the user + if a known insecure location is used. + + See Distribute #375 for more details. + """ + if os.name == 'nt' and not path.startswith(os.environ['windir']): + # On Windows, permissions are generally restrictive by default + # and temp directories are not writable by other users, so + # bypass the warning. + return + mode = os.stat(path).st_mode + if mode & stat.S_IWOTH or mode & stat.S_IWGRP: + msg = ("%s is writable by group/others and vulnerable to attack " + "when " + "used with get_resource_filename. Consider a more secure " + "location (set with .set_extraction_path or the " + "PYTHON_EGG_CACHE environment variable)." % path) + warnings.warn(msg, UserWarning) + + def postprocess(self, tempname, filename): + """Perform any platform-specific postprocessing of `tempname` + + This is where Mac header rewrites should be done; other platforms don't + have anything special they should do. + + Resource providers should call this method ONLY after successfully + extracting a compressed resource. They must NOT call it on resources + that are already in the filesystem. + + `tempname` is the current (temporary) name of the file, and `filename` + is the name it will be renamed to by the caller after this routine + returns. + """ + + if os.name == 'posix': + # Make the resource executable + mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 + os.chmod(tempname, mode) + + def set_extraction_path(self, path): + """Set the base path where resources will be extracted to, if needed. + + If you do not call this routine before any extractions take place, the + path defaults to the return value of ``get_default_cache()``. (Which + is based on the ``PYTHON_EGG_CACHE`` environment variable, with various + platform-specific fallbacks. See that routine's documentation for more + details.) + + Resources are extracted to subdirectories of this path based upon + information given by the ``IResourceProvider``. You may set this to a + temporary directory, but then you must call ``cleanup_resources()`` to + delete the extracted files when done. There is no guarantee that + ``cleanup_resources()`` will be able to remove all extracted files. + + (Note: you may not change the extraction path for a given resource + manager once resources have been extracted, unless you first call + ``cleanup_resources()``.) + """ + if self.cached_files: + raise ValueError( + "Can't change extraction path, files already extracted" + ) + + self.extraction_path = path + + def cleanup_resources(self, force=False): + """ + Delete all extracted resource files and directories, returning a list + of the file and directory names that could not be successfully removed. + This function does not have any concurrency protection, so it should + generally only be called when the extraction path is a temporary + directory exclusive to a single process. This method is not + automatically called; you must call it explicitly or register it as an + ``atexit`` function if you wish to ensure cleanup of a temporary + directory used for extractions. + """ + # XXX + +def get_default_cache(): + """Determine the default cache location + + This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. + Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the + "Application Data" directory. On all other systems, it's "~/.python-eggs". + """ + try: + return os.environ['PYTHON_EGG_CACHE'] + except KeyError: + pass + + if os.name!='nt': + return os.path.expanduser('~/.python-eggs') + + # XXX this may be locale-specific! + app_data = 'Application Data' + app_homes = [ + # best option, should be locale-safe + (('APPDATA',), None), + (('USERPROFILE',), app_data), + (('HOMEDRIVE','HOMEPATH'), app_data), + (('HOMEPATH',), app_data), + (('HOME',), None), + # 95/98/ME + (('WINDIR',), app_data), + ] + + for keys, subdir in app_homes: + dirname = '' + for key in keys: + if key in os.environ: + dirname = os.path.join(dirname, os.environ[key]) + else: + break + else: + if subdir: + dirname = os.path.join(dirname, subdir) + return os.path.join(dirname, 'Python-Eggs') + else: + raise RuntimeError( + "Please set the PYTHON_EGG_CACHE enviroment variable" + ) + +def safe_name(name): + """Convert an arbitrary string to a standard distribution name + + Any runs of non-alphanumeric/. characters are replaced with a single '-'. + """ + return re.sub('[^A-Za-z0-9.]+', '-', name) + + +def safe_version(version): + """ + Convert an arbitrary string to a standard version string + """ + try: + # normalize the version + return str(packaging.version.Version(version)) + except packaging.version.InvalidVersion: + version = version.replace(' ','.') + return re.sub('[^A-Za-z0-9.]+', '-', version) + + +def safe_extra(extra): + """Convert an arbitrary string to a standard 'extra' name + + Any runs of non-alphanumeric characters are replaced with a single '_', + and the result is always lowercased. + """ + return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() + + +def to_filename(name): + """Convert a project or version name to its filename-escaped form + + Any '-' characters are currently replaced with '_'. + """ + return name.replace('-','_') + + +class MarkerEvaluation(object): + values = { + 'os_name': lambda: os.name, + 'sys_platform': lambda: sys.platform, + 'python_full_version': platform.python_version, + 'python_version': lambda: platform.python_version()[:3], + 'platform_version': platform.version, + 'platform_machine': platform.machine, + 'python_implementation': platform.python_implementation, + } + + @classmethod + def is_invalid_marker(cls, text): + """ + Validate text as a PEP 426 environment marker; return an exception + if invalid or False otherwise. + """ + try: + cls.evaluate_marker(text) + except SyntaxError as e: + return cls.normalize_exception(e) + return False + + @staticmethod + def normalize_exception(exc): + """ + Given a SyntaxError from a marker evaluation, normalize the error + message: + - Remove indications of filename and line number. + - Replace platform-specific error messages with standard error + messages. + """ + subs = { + 'unexpected EOF while parsing': 'invalid syntax', + 'parenthesis is never closed': 'invalid syntax', + } + exc.filename = None + exc.lineno = None + exc.msg = subs.get(exc.msg, exc.msg) + return exc + + @classmethod + def and_test(cls, nodelist): + # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! + items = [ + cls.interpret(nodelist[i]) + for i in range(1, len(nodelist), 2) + ] + return functools.reduce(operator.and_, items) + + @classmethod + def test(cls, nodelist): + # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! + items = [ + cls.interpret(nodelist[i]) + for i in range(1, len(nodelist), 2) + ] + return functools.reduce(operator.or_, items) + + @classmethod + def atom(cls, nodelist): + t = nodelist[1][0] + if t == token.LPAR: + if nodelist[2][0] == token.RPAR: + raise SyntaxError("Empty parentheses") + return cls.interpret(nodelist[2]) + msg = "Language feature not supported in environment markers" + raise SyntaxError(msg) + + @classmethod + def comparison(cls, nodelist): + if len(nodelist) > 4: + msg = "Chained comparison not allowed in environment markers" + raise SyntaxError(msg) + comp = nodelist[2][1] + cop = comp[1] + if comp[0] == token.NAME: + if len(nodelist[2]) == 3: + if cop == 'not': + cop = 'not in' + else: + cop = 'is not' + try: + cop = cls.get_op(cop) + except KeyError: + msg = repr(cop) + " operator not allowed in environment markers" + raise SyntaxError(msg) + return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) + + @classmethod + def get_op(cls, op): + ops = { + symbol.test: cls.test, + symbol.and_test: cls.and_test, + symbol.atom: cls.atom, + symbol.comparison: cls.comparison, + 'not in': lambda x, y: x not in y, + 'in': lambda x, y: x in y, + '==': operator.eq, + '!=': operator.ne, + '<': operator.lt, + '>': operator.gt, + '<=': operator.le, + '>=': operator.ge, + } + if hasattr(symbol, 'or_test'): + ops[symbol.or_test] = cls.test + return ops[op] + + @classmethod + def evaluate_marker(cls, text, extra=None): + """ + Evaluate a PEP 426 environment marker on CPython 2.4+. + Return a boolean indicating the marker result in this environment. + Raise SyntaxError if marker is invalid. + + This implementation uses the 'parser' module, which is not implemented + on + Jython and has been superseded by the 'ast' module in Python 2.6 and + later. + """ + return cls.interpret(parser.expr(text).totuple(1)[1]) + + @classmethod + def _markerlib_evaluate(cls, text): + """ + Evaluate a PEP 426 environment marker using markerlib. + Return a boolean indicating the marker result in this environment. + Raise SyntaxError if marker is invalid. + """ + import _markerlib + # markerlib implements Metadata 1.2 (PEP 345) environment markers. + # Translate the variables to Metadata 2.0 (PEP 426). + env = _markerlib.default_environment() + for key in env.keys(): + new_key = key.replace('.', '_') + env[new_key] = env.pop(key) + try: + result = _markerlib.interpret(text, env) + except NameError as e: + raise SyntaxError(e.args[0]) + return result + + if 'parser' not in globals(): + # Fall back to less-complete _markerlib implementation if 'parser' module + # is not available. + evaluate_marker = _markerlib_evaluate + + @classmethod + def interpret(cls, nodelist): + while len(nodelist)==2: nodelist = nodelist[1] + try: + op = cls.get_op(nodelist[0]) + except KeyError: + raise SyntaxError("Comparison or logical expression expected") + return op(nodelist) + + @classmethod + def evaluate(cls, nodelist): + while len(nodelist)==2: nodelist = nodelist[1] + kind = nodelist[0] + name = nodelist[1] + if kind==token.NAME: + try: + op = cls.values[name] + except KeyError: + raise SyntaxError("Unknown name %r" % name) + return op() + if kind==token.STRING: + s = nodelist[1] + if not cls._safe_string(s): + raise SyntaxError( + "Only plain strings allowed in environment markers") + return s[1:-1] + msg = "Language feature not supported in environment markers" + raise SyntaxError(msg) + + @staticmethod + def _safe_string(cand): + return ( + cand[:1] in "'\"" and + not cand.startswith('"""') and + not cand.startswith("'''") and + '\\' not in cand + ) + +invalid_marker = MarkerEvaluation.is_invalid_marker +evaluate_marker = MarkerEvaluation.evaluate_marker + +class NullProvider: + """Try to implement resources and metadata for arbitrary PEP 302 loaders""" + + egg_name = None + egg_info = None + loader = None + + def __init__(self, module): + self.loader = getattr(module, '__loader__', None) + self.module_path = os.path.dirname(getattr(module, '__file__', '')) + + def get_resource_filename(self, manager, resource_name): + return self._fn(self.module_path, resource_name) + + def get_resource_stream(self, manager, resource_name): + return io.BytesIO(self.get_resource_string(manager, resource_name)) + + def get_resource_string(self, manager, resource_name): + return self._get(self._fn(self.module_path, resource_name)) + + def has_resource(self, resource_name): + return self._has(self._fn(self.module_path, resource_name)) + + def has_metadata(self, name): + return self.egg_info and self._has(self._fn(self.egg_info, name)) + + if sys.version_info <= (3,): + def get_metadata(self, name): + if not self.egg_info: + return "" + return self._get(self._fn(self.egg_info, name)) + else: + def get_metadata(self, name): + if not self.egg_info: + return "" + return self._get(self._fn(self.egg_info, name)).decode("utf-8") + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + def resource_isdir(self, resource_name): + return self._isdir(self._fn(self.module_path, resource_name)) + + def metadata_isdir(self, name): + return self.egg_info and self._isdir(self._fn(self.egg_info, name)) + + def resource_listdir(self, resource_name): + return self._listdir(self._fn(self.module_path, resource_name)) + + def metadata_listdir(self, name): + if self.egg_info: + return self._listdir(self._fn(self.egg_info, name)) + return [] + + def run_script(self, script_name, namespace): + script = 'scripts/'+script_name + if not self.has_metadata(script): + raise ResolutionError("No script named %r" % script_name) + script_text = self.get_metadata(script).replace('\r\n', '\n') + script_text = script_text.replace('\r', '\n') + script_filename = self._fn(self.egg_info, script) + namespace['__file__'] = script_filename + if os.path.exists(script_filename): + source = open(script_filename).read() + code = compile(source, script_filename, 'exec') + exec(code, namespace, namespace) + else: + from linecache import cache + cache[script_filename] = ( + len(script_text), 0, script_text.split('\n'), script_filename + ) + script_code = compile(script_text, script_filename,'exec') + exec(script_code, namespace, namespace) + + def _has(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _isdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _listdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _fn(self, base, resource_name): + if resource_name: + return os.path.join(base, *resource_name.split('/')) + return base + + def _get(self, path): + if hasattr(self.loader, 'get_data'): + return self.loader.get_data(path) + raise NotImplementedError( + "Can't perform this operation for loaders without 'get_data()'" + ) + +register_loader_type(object, NullProvider) + + +class EggProvider(NullProvider): + """Provider based on a virtual filesystem""" + + def __init__(self, module): + NullProvider.__init__(self, module) + self._setup_prefix() + + def _setup_prefix(self): + # we assume here that our metadata may be nested inside a "basket" + # of multiple eggs; that's why we use module_path instead of .archive + path = self.module_path + old = None + while path!=old: + if path.lower().endswith('.egg'): + self.egg_name = os.path.basename(path) + self.egg_info = os.path.join(path, 'EGG-INFO') + self.egg_root = path + break + old = path + path, base = os.path.split(path) + +class DefaultProvider(EggProvider): + """Provides access to package resources in the filesystem""" + + def _has(self, path): + return os.path.exists(path) + + def _isdir(self, path): + return os.path.isdir(path) + + def _listdir(self, path): + return os.listdir(path) + + def get_resource_stream(self, manager, resource_name): + return open(self._fn(self.module_path, resource_name), 'rb') + + def _get(self, path): + with open(path, 'rb') as stream: + return stream.read() + +register_loader_type(type(None), DefaultProvider) + +if importlib_machinery is not None: + register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider) + + +class EmptyProvider(NullProvider): + """Provider that returns nothing for all requests""" + + _isdir = _has = lambda self, path: False + _get = lambda self, path: '' + _listdir = lambda self, path: [] + module_path = None + + def __init__(self): + pass + +empty_provider = EmptyProvider() + + +class ZipManifests(dict): + """ + zip manifest builder + """ + + @classmethod + def build(cls, path): + """ + Build a dictionary similar to the zipimport directory + caches, except instead of tuples, store ZipInfo objects. + + Use a platform-specific path separator (os.sep) for the path keys + for compatibility with pypy on Windows. + """ + with ContextualZipFile(path) as zfile: + items = ( + ( + name.replace('/', os.sep), + zfile.getinfo(name), + ) + for name in zfile.namelist() + ) + return dict(items) + + load = build + + +class MemoizedZipManifests(ZipManifests): + """ + Memoized zipfile manifests. + """ + manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') + + def load(self, path): + """ + Load a manifest at path or return a suitable manifest already loaded. + """ + path = os.path.normpath(path) + mtime = os.stat(path).st_mtime + + if path not in self or self[path].mtime != mtime: + manifest = self.build(path) + self[path] = self.manifest_mod(manifest, mtime) + + return self[path].manifest + + +class ContextualZipFile(zipfile.ZipFile): + """ + Supplement ZipFile class to support context manager for Python 2.6 + """ + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __new__(cls, *args, **kwargs): + """ + Construct a ZipFile or ContextualZipFile as appropriate + """ + if hasattr(zipfile.ZipFile, '__exit__'): + return zipfile.ZipFile(*args, **kwargs) + return super(ContextualZipFile, cls).__new__(cls) + + +class ZipProvider(EggProvider): + """Resource support for zips and eggs""" + + eagers = None + _zip_manifests = MemoizedZipManifests() + + def __init__(self, module): + EggProvider.__init__(self, module) + self.zip_pre = self.loader.archive+os.sep + + def _zipinfo_name(self, fspath): + # Convert a virtual filename (full path to file) into a zipfile subpath + # usable with the zipimport directory cache for our target archive + if fspath.startswith(self.zip_pre): + return fspath[len(self.zip_pre):] + raise AssertionError( + "%s is not a subpath of %s" % (fspath, self.zip_pre) + ) + + def _parts(self, zip_path): + # Convert a zipfile subpath into an egg-relative path part list. + # pseudo-fs path + fspath = self.zip_pre+zip_path + if fspath.startswith(self.egg_root+os.sep): + return fspath[len(self.egg_root)+1:].split(os.sep) + raise AssertionError( + "%s is not a subpath of %s" % (fspath, self.egg_root) + ) + + @property + def zipinfo(self): + return self._zip_manifests.load(self.loader.archive) + + def get_resource_filename(self, manager, resource_name): + if not self.egg_name: + raise NotImplementedError( + "resource_filename() only supported for .egg, not .zip" + ) + # no need to lock for extraction, since we use temp names + zip_path = self._resource_to_zip(resource_name) + eagers = self._get_eager_resources() + if '/'.join(self._parts(zip_path)) in eagers: + for name in eagers: + self._extract_resource(manager, self._eager_to_zip(name)) + return self._extract_resource(manager, zip_path) + + @staticmethod + def _get_date_and_size(zip_stat): + size = zip_stat.file_size + # ymdhms+wday, yday, dst + date_time = zip_stat.date_time + (0, 0, -1) + # 1980 offset already done + timestamp = time.mktime(date_time) + return timestamp, size + + def _extract_resource(self, manager, zip_path): + + if zip_path in self._index(): + for name in self._index()[zip_path]: + last = self._extract_resource( + manager, os.path.join(zip_path, name) + ) + # return the extracted directory name + return os.path.dirname(last) + + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + + if not WRITE_SUPPORT: + raise IOError('"os.rename" and "os.unlink" are not supported ' + 'on this platform') + try: + + real_path = manager.get_cache_path( + self.egg_name, self._parts(zip_path) + ) + + if self._is_current(real_path, zip_path): + return real_path + + outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) + os.write(outf, self.loader.get_data(zip_path)) + os.close(outf) + utime(tmpnam, (timestamp, timestamp)) + manager.postprocess(tmpnam, real_path) + + try: + rename(tmpnam, real_path) + + except os.error: + if os.path.isfile(real_path): + if self._is_current(real_path, zip_path): + # the file became current since it was checked above, + # so proceed. + return real_path + # Windows, del old file and retry + elif os.name=='nt': + unlink(real_path) + rename(tmpnam, real_path) + return real_path + raise + + except os.error: + # report a user-friendly error + manager.extraction_error() + + return real_path + + def _is_current(self, file_path, zip_path): + """ + Return True if the file_path is current for this zip_path + """ + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + if not os.path.isfile(file_path): + return False + stat = os.stat(file_path) + if stat.st_size!=size or stat.st_mtime!=timestamp: + return False + # check that the contents match + zip_contents = self.loader.get_data(zip_path) + with open(file_path, 'rb') as f: + file_contents = f.read() + return zip_contents == file_contents + + def _get_eager_resources(self): + if self.eagers is None: + eagers = [] + for name in ('native_libs.txt', 'eager_resources.txt'): + if self.has_metadata(name): + eagers.extend(self.get_metadata_lines(name)) + self.eagers = eagers + return self.eagers + + def _index(self): + try: + return self._dirindex + except AttributeError: + ind = {} + for path in self.zipinfo: + parts = path.split(os.sep) + while parts: + parent = os.sep.join(parts[:-1]) + if parent in ind: + ind[parent].append(parts[-1]) + break + else: + ind[parent] = [parts.pop()] + self._dirindex = ind + return ind + + def _has(self, fspath): + zip_path = self._zipinfo_name(fspath) + return zip_path in self.zipinfo or zip_path in self._index() + + def _isdir(self, fspath): + return self._zipinfo_name(fspath) in self._index() + + def _listdir(self, fspath): + return list(self._index().get(self._zipinfo_name(fspath), ())) + + def _eager_to_zip(self, resource_name): + return self._zipinfo_name(self._fn(self.egg_root, resource_name)) + + def _resource_to_zip(self, resource_name): + return self._zipinfo_name(self._fn(self.module_path, resource_name)) + +register_loader_type(zipimport.zipimporter, ZipProvider) + + +class FileMetadata(EmptyProvider): + """Metadata handler for standalone PKG-INFO files + + Usage:: + + metadata = FileMetadata("/path/to/PKG-INFO") + + This provider rejects all data and metadata requests except for PKG-INFO, + which is treated as existing, and will be the contents of the file at + the provided location. + """ + + def __init__(self, path): + self.path = path + + def has_metadata(self, name): + return name=='PKG-INFO' + + def get_metadata(self, name): + if name=='PKG-INFO': + with open(self.path,'rU') as f: + metadata = f.read() + return metadata + raise KeyError("No metadata except PKG-INFO is available") + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + +class PathMetadata(DefaultProvider): + """Metadata provider for egg directories + + Usage:: + + # Development eggs: + + egg_info = "/path/to/PackageName.egg-info" + base_dir = os.path.dirname(egg_info) + metadata = PathMetadata(base_dir, egg_info) + dist_name = os.path.splitext(os.path.basename(egg_info))[0] + dist = Distribution(basedir, project_name=dist_name, metadata=metadata) + + # Unpacked egg directories: + + egg_path = "/path/to/PackageName-ver-pyver-etc.egg" + metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) + dist = Distribution.from_filename(egg_path, metadata=metadata) + """ + + def __init__(self, path, egg_info): + self.module_path = path + self.egg_info = egg_info + + +class EggMetadata(ZipProvider): + """Metadata provider for .egg files""" + + def __init__(self, importer): + """Create a metadata provider from a zipimporter""" + + self.zip_pre = importer.archive+os.sep + self.loader = importer + if importer.prefix: + self.module_path = os.path.join(importer.archive, importer.prefix) + else: + self.module_path = importer.archive + self._setup_prefix() + +_declare_state('dict', _distribution_finders = {}) + +def register_finder(importer_type, distribution_finder): + """Register `distribution_finder` to find distributions in sys.path items + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `distribution_finder` is a callable that, passed a path + item and the importer instance, yields ``Distribution`` instances found on + that path item. See ``pkg_resources.find_on_path`` for an example.""" + _distribution_finders[importer_type] = distribution_finder + + +def find_distributions(path_item, only=False): + """Yield distributions accessible via `path_item`""" + importer = get_importer(path_item) + finder = _find_adapter(_distribution_finders, importer) + return finder(importer, path_item, only) + +def find_eggs_in_zip(importer, path_item, only=False): + """ + Find eggs in zip files; possibly multiple nested eggs. + """ + if importer.archive.endswith('.whl'): + # wheels are not supported with this finder + # they don't have PKG-INFO metadata, and won't ever contain eggs + return + metadata = EggMetadata(importer) + if metadata.has_metadata('PKG-INFO'): + yield Distribution.from_filename(path_item, metadata=metadata) + if only: + # don't yield nested distros + return + for subitem in metadata.resource_listdir('/'): + if subitem.endswith('.egg'): + subpath = os.path.join(path_item, subitem) + for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): + yield dist + +register_finder(zipimport.zipimporter, find_eggs_in_zip) + +def find_nothing(importer, path_item, only=False): + return () +register_finder(object, find_nothing) + +def find_on_path(importer, path_item, only=False): + """Yield distributions accessible on a sys.path directory""" + path_item = _normalize_cached(path_item) + + if os.path.isdir(path_item) and os.access(path_item, os.R_OK): + if path_item.lower().endswith('.egg'): + # unpacked egg + yield Distribution.from_filename( + path_item, metadata=PathMetadata( + path_item, os.path.join(path_item,'EGG-INFO') + ) + ) + else: + # scan for .egg and .egg-info in directory + for entry in os.listdir(path_item): + lower = entry.lower() + if lower.endswith('.egg-info') or lower.endswith('.dist-info'): + fullpath = os.path.join(path_item, entry) + if os.path.isdir(fullpath): + # egg-info directory, allow getting metadata + metadata = PathMetadata(path_item, fullpath) + else: + metadata = FileMetadata(fullpath) + yield Distribution.from_location( + path_item, entry, metadata, precedence=DEVELOP_DIST + ) + elif not only and lower.endswith('.egg'): + dists = find_distributions(os.path.join(path_item, entry)) + for dist in dists: + yield dist + elif not only and lower.endswith('.egg-link'): + with open(os.path.join(path_item, entry)) as entry_file: + entry_lines = entry_file.readlines() + for line in entry_lines: + if not line.strip(): + continue + path = os.path.join(path_item, line.rstrip()) + dists = find_distributions(path) + for item in dists: + yield item + break +register_finder(pkgutil.ImpImporter, find_on_path) + +if importlib_machinery is not None: + register_finder(importlib_machinery.FileFinder, find_on_path) + +_declare_state('dict', _namespace_handlers={}) +_declare_state('dict', _namespace_packages={}) + + +def register_namespace_handler(importer_type, namespace_handler): + """Register `namespace_handler` to declare namespace packages + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `namespace_handler` is a callable like this:: + + def namespace_handler(importer, path_entry, moduleName, module): + # return a path_entry to use for child packages + + Namespace handlers are only called if the importer object has already + agreed that it can handle the relevant path item, and they should only + return a subpath if the module __path__ does not already contain an + equivalent subpath. For an example namespace handler, see + ``pkg_resources.file_ns_handler``. + """ + _namespace_handlers[importer_type] = namespace_handler + +def _handle_ns(packageName, path_item): + """Ensure that named package includes a subpath of path_item (if needed)""" + + importer = get_importer(path_item) + if importer is None: + return None + loader = importer.find_module(packageName) + if loader is None: + return None + module = sys.modules.get(packageName) + if module is None: + module = sys.modules[packageName] = types.ModuleType(packageName) + module.__path__ = [] + _set_parent_ns(packageName) + elif not hasattr(module,'__path__'): + raise TypeError("Not a package:", packageName) + handler = _find_adapter(_namespace_handlers, importer) + subpath = handler(importer, path_item, packageName, module) + if subpath is not None: + path = module.__path__ + path.append(subpath) + loader.load_module(packageName) + for path_item in path: + if path_item not in module.__path__: + module.__path__.append(path_item) + return subpath + +def declare_namespace(packageName): + """Declare that package 'packageName' is a namespace package""" + + _imp.acquire_lock() + try: + if packageName in _namespace_packages: + return + + path, parent = sys.path, None + if '.' in packageName: + parent = '.'.join(packageName.split('.')[:-1]) + declare_namespace(parent) + if parent not in _namespace_packages: + __import__(parent) + try: + path = sys.modules[parent].__path__ + except AttributeError: + raise TypeError("Not a package:", parent) + + # Track what packages are namespaces, so when new path items are added, + # they can be updated + _namespace_packages.setdefault(parent,[]).append(packageName) + _namespace_packages.setdefault(packageName,[]) + + for path_item in path: + # Ensure all the parent's path items are reflected in the child, + # if they apply + _handle_ns(packageName, path_item) + + finally: + _imp.release_lock() + +def fixup_namespace_packages(path_item, parent=None): + """Ensure that previously-declared namespace packages include path_item""" + _imp.acquire_lock() + try: + for package in _namespace_packages.get(parent,()): + subpath = _handle_ns(package, path_item) + if subpath: + fixup_namespace_packages(subpath, package) + finally: + _imp.release_lock() + +def file_ns_handler(importer, path_item, packageName, module): + """Compute an ns-package subpath for a filesystem or zipfile importer""" + + subpath = os.path.join(path_item, packageName.split('.')[-1]) + normalized = _normalize_cached(subpath) + for item in module.__path__: + if _normalize_cached(item)==normalized: + break + else: + # Only return the path if it's not already there + return subpath + +register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) +register_namespace_handler(zipimport.zipimporter, file_ns_handler) + +if importlib_machinery is not None: + register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) + + +def null_ns_handler(importer, path_item, packageName, module): + return None + +register_namespace_handler(object, null_ns_handler) + + +def normalize_path(filename): + """Normalize a file/dir name for comparison purposes""" + return os.path.normcase(os.path.realpath(filename)) + +def _normalize_cached(filename, _cache={}): + try: + return _cache[filename] + except KeyError: + _cache[filename] = result = normalize_path(filename) + return result + +def _set_parent_ns(packageName): + parts = packageName.split('.') + name = parts.pop() + if parts: + parent = '.'.join(parts) + setattr(sys.modules[parent], name, sys.modules[packageName]) + + +def yield_lines(strs): + """Yield non-empty/non-comment lines of a string or sequence""" + if isinstance(strs, string_types): + for s in strs.splitlines(): + s = s.strip() + # skip blank lines/comments + if s and not s.startswith('#'): + yield s + else: + for ss in strs: + for s in yield_lines(ss): + yield s + +# whitespace and comment +LINE_END = re.compile(r"\s*(#.*)?$").match +# line continuation +CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match +# Distribution or extra +DISTRO = re.compile(r"\s*((\w|[-.])+)").match +# ver. info +VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match +# comma between items +COMMA = re.compile(r"\s*,").match +OBRACKET = re.compile(r"\s*\[").match +CBRACKET = re.compile(r"\s*\]").match +MODULE = re.compile(r"\w+(\.\w+)*$").match +EGG_NAME = re.compile( + r""" + (?P[^-]+) ( + -(?P[^-]+) ( + -py(?P[^-]+) ( + -(?P.+) + )? + )? + )? + """, + re.VERBOSE | re.IGNORECASE, +).match + + +class EntryPoint(object): + """Object representing an advertised importable object""" + + def __init__(self, name, module_name, attrs=(), extras=(), dist=None): + if not MODULE(module_name): + raise ValueError("Invalid module name", module_name) + self.name = name + self.module_name = module_name + self.attrs = tuple(attrs) + self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras + self.dist = dist + + def __str__(self): + s = "%s = %s" % (self.name, self.module_name) + if self.attrs: + s += ':' + '.'.join(self.attrs) + if self.extras: + s += ' [%s]' % ','.join(self.extras) + return s + + def __repr__(self): + return "EntryPoint.parse(%r)" % str(self) + + def load(self, require=True, *args, **kwargs): + """ + Require packages for this EntryPoint, then resolve it. + """ + if not require or args or kwargs: + warnings.warn( + "Parameters to load are deprecated. Call .resolve and " + ".require separately.", + DeprecationWarning, + stacklevel=2, + ) + if require: + self.require(*args, **kwargs) + return self.resolve() + + def resolve(self): + """ + Resolve the entry point from its module and attrs. + """ + module = __import__(self.module_name, fromlist=['__name__'], level=0) + try: + return functools.reduce(getattr, self.attrs, module) + except AttributeError as exc: + raise ImportError(str(exc)) + + def require(self, env=None, installer=None): + if self.extras and not self.dist: + raise UnknownExtra("Can't require() without a distribution", self) + reqs = self.dist.requires(self.extras) + items = working_set.resolve(reqs, env, installer) + list(map(working_set.add, items)) + + pattern = re.compile( + r'\s*' + r'(?P.+?)\s*' + r'=\s*' + r'(?P[\w.]+)\s*' + r'(:\s*(?P[\w.]+))?\s*' + r'(?P\[.*\])?\s*$' + ) + + @classmethod + def parse(cls, src, dist=None): + """Parse a single entry point from string `src` + + Entry point syntax follows the form:: + + name = some.module:some.attr [extra1, extra2] + + The entry name and module name are required, but the ``:attrs`` and + ``[extras]`` parts are optional + """ + m = cls.pattern.match(src) + if not m: + msg = "EntryPoint must be in 'name=module:attrs [extras]' format" + raise ValueError(msg, src) + res = m.groupdict() + extras = cls._parse_extras(res['extras']) + attrs = res['attr'].split('.') if res['attr'] else () + return cls(res['name'], res['module'], attrs, extras, dist) + + @classmethod + def _parse_extras(cls, extras_spec): + if not extras_spec: + return () + req = Requirement.parse('x' + extras_spec) + if req.specs: + raise ValueError() + return req.extras + + @classmethod + def parse_group(cls, group, lines, dist=None): + """Parse an entry point group""" + if not MODULE(group): + raise ValueError("Invalid group name", group) + this = {} + for line in yield_lines(lines): + ep = cls.parse(line, dist) + if ep.name in this: + raise ValueError("Duplicate entry point", group, ep.name) + this[ep.name]=ep + return this + + @classmethod + def parse_map(cls, data, dist=None): + """Parse a map of entry point groups""" + if isinstance(data, dict): + data = data.items() + else: + data = split_sections(data) + maps = {} + for group, lines in data: + if group is None: + if not lines: + continue + raise ValueError("Entry points must be listed in groups") + group = group.strip() + if group in maps: + raise ValueError("Duplicate group name", group) + maps[group] = cls.parse_group(group, lines, dist) + return maps + + +def _remove_md5_fragment(location): + if not location: + return '' + parsed = urlparse(location) + if parsed[-1].startswith('md5='): + return urlunparse(parsed[:-1] + ('',)) + return location + + +class Distribution(object): + """Wrap an actual or potential sys.path entry w/metadata""" + PKG_INFO = 'PKG-INFO' + + def __init__(self, location=None, metadata=None, project_name=None, + version=None, py_version=PY_MAJOR, platform=None, + precedence=EGG_DIST): + self.project_name = safe_name(project_name or 'Unknown') + if version is not None: + self._version = safe_version(version) + self.py_version = py_version + self.platform = platform + self.location = location + self.precedence = precedence + self._provider = metadata or empty_provider + + @classmethod + def from_location(cls, location, basename, metadata=None,**kw): + project_name, version, py_version, platform = [None]*4 + basename, ext = os.path.splitext(basename) + if ext.lower() in _distributionImpl: + # .dist-info gets much metadata differently + match = EGG_NAME(basename) + if match: + project_name, version, py_version, platform = match.group( + 'name','ver','pyver','plat' + ) + cls = _distributionImpl[ext.lower()] + return cls( + location, metadata, project_name=project_name, version=version, + py_version=py_version, platform=platform, **kw + ) + + @property + def hashcmp(self): + return ( + self.parsed_version, + self.precedence, + self.key, + _remove_md5_fragment(self.location), + self.py_version or '', + self.platform or '', + ) + + def __hash__(self): + return hash(self.hashcmp) + + def __lt__(self, other): + return self.hashcmp < other.hashcmp + + def __le__(self, other): + return self.hashcmp <= other.hashcmp + + def __gt__(self, other): + return self.hashcmp > other.hashcmp + + def __ge__(self, other): + return self.hashcmp >= other.hashcmp + + def __eq__(self, other): + if not isinstance(other, self.__class__): + # It's not a Distribution, so they are not equal + return False + return self.hashcmp == other.hashcmp + + def __ne__(self, other): + return not self == other + + # These properties have to be lazy so that we don't have to load any + # metadata until/unless it's actually needed. (i.e., some distributions + # may not know their name or version without loading PKG-INFO) + + @property + def key(self): + try: + return self._key + except AttributeError: + self._key = key = self.project_name.lower() + return key + + @property + def parsed_version(self): + if not hasattr(self, "_parsed_version"): + self._parsed_version = parse_version(self.version) + + return self._parsed_version + + def _warn_legacy_version(self): + LV = packaging.version.LegacyVersion + is_legacy = isinstance(self._parsed_version, LV) + if not is_legacy: + return + + # While an empty version is technically a legacy version and + # is not a valid PEP 440 version, it's also unlikely to + # actually come from someone and instead it is more likely that + # it comes from setuptools attempting to parse a filename and + # including it in the list. So for that we'll gate this warning + # on if the version is anything at all or not. + if not self.version: + return + + tmpl = textwrap.dedent(""" + '{project_name} ({version})' is being parsed as a legacy, + non PEP 440, + version. You may find odd behavior and sort order. + In particular it will be sorted as less than 0.0. It + is recommended to migrate to PEP 440 compatible + versions. + """).strip().replace('\n', ' ') + + warnings.warn(tmpl.format(**vars(self)), PEP440Warning) + + @property + def version(self): + try: + return self._version + except AttributeError: + for line in self._get_metadata(self.PKG_INFO): + if line.lower().startswith('version:'): + self._version = safe_version(line.split(':',1)[1].strip()) + return self._version + else: + tmpl = "Missing 'Version:' header and/or %s file" + raise ValueError(tmpl % self.PKG_INFO, self) + + @property + def _dep_map(self): + try: + return self.__dep_map + except AttributeError: + dm = self.__dep_map = {None: []} + for name in 'requires.txt', 'depends.txt': + for extra, reqs in split_sections(self._get_metadata(name)): + if extra: + if ':' in extra: + extra, marker = extra.split(':', 1) + if invalid_marker(marker): + # XXX warn + reqs=[] + elif not evaluate_marker(marker): + reqs=[] + extra = safe_extra(extra) or None + dm.setdefault(extra,[]).extend(parse_requirements(reqs)) + return dm + + def requires(self, extras=()): + """List of Requirements needed for this distro if `extras` are used""" + dm = self._dep_map + deps = [] + deps.extend(dm.get(None, ())) + for ext in extras: + try: + deps.extend(dm[safe_extra(ext)]) + except KeyError: + raise UnknownExtra( + "%s has no such extra feature %r" % (self, ext) + ) + return deps + + def _get_metadata(self, name): + if self.has_metadata(name): + for line in self.get_metadata_lines(name): + yield line + + def activate(self, path=None): + """Ensure distribution is importable on `path` (default=sys.path)""" + if path is None: + path = sys.path + self.insert_on(path) + if path is sys.path: + fixup_namespace_packages(self.location) + for pkg in self._get_metadata('namespace_packages.txt'): + if pkg in sys.modules: + declare_namespace(pkg) + + def egg_name(self): + """Return what this distribution's standard .egg filename should be""" + filename = "%s-%s-py%s" % ( + to_filename(self.project_name), to_filename(self.version), + self.py_version or PY_MAJOR + ) + + if self.platform: + filename += '-' + self.platform + return filename + + def __repr__(self): + if self.location: + return "%s (%s)" % (self, self.location) + else: + return str(self) + + def __str__(self): + try: + version = getattr(self, 'version', None) + except ValueError: + version = None + version = version or "[unknown version]" + return "%s %s" % (self.project_name, version) + + def __getattr__(self, attr): + """Delegate all unrecognized public attributes to .metadata provider""" + if attr.startswith('_'): + raise AttributeError(attr) + return getattr(self._provider, attr) + + @classmethod + def from_filename(cls, filename, metadata=None, **kw): + return cls.from_location( + _normalize_cached(filename), os.path.basename(filename), metadata, + **kw + ) + + def as_requirement(self): + """Return a ``Requirement`` that matches this distribution exactly""" + if isinstance(self.parsed_version, packaging.version.Version): + spec = "%s==%s" % (self.project_name, self.parsed_version) + else: + spec = "%s===%s" % (self.project_name, self.parsed_version) + + return Requirement.parse(spec) + + def load_entry_point(self, group, name): + """Return the `name` entry point of `group` or raise ImportError""" + ep = self.get_entry_info(group, name) + if ep is None: + raise ImportError("Entry point %r not found" % ((group, name),)) + return ep.load() + + def get_entry_map(self, group=None): + """Return the entry point map for `group`, or the full entry map""" + try: + ep_map = self._ep_map + except AttributeError: + ep_map = self._ep_map = EntryPoint.parse_map( + self._get_metadata('entry_points.txt'), self + ) + if group is not None: + return ep_map.get(group,{}) + return ep_map + + def get_entry_info(self, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return self.get_entry_map(group).get(name) + + def insert_on(self, path, loc = None): + """Insert self.location in path before its nearest parent directory""" + + loc = loc or self.location + if not loc: + return + + nloc = _normalize_cached(loc) + bdir = os.path.dirname(nloc) + npath= [(p and _normalize_cached(p) or p) for p in path] + + for p, item in enumerate(npath): + if item == nloc: + break + elif item == bdir and self.precedence == EGG_DIST: + # if it's an .egg, give it precedence over its directory + if path is sys.path: + self.check_version_conflict() + path.insert(p, loc) + npath.insert(p, nloc) + break + else: + if path is sys.path: + self.check_version_conflict() + path.append(loc) + return + + # p is the spot where we found or inserted loc; now remove duplicates + while True: + try: + np = npath.index(nloc, p+1) + except ValueError: + break + else: + del npath[np], path[np] + # ha! + p = np + + return + + def check_version_conflict(self): + if self.key == 'setuptools': + # ignore the inevitable setuptools self-conflicts :( + return + + nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) + loc = normalize_path(self.location) + for modname in self._get_metadata('top_level.txt'): + if (modname not in sys.modules or modname in nsp + or modname in _namespace_packages): + continue + if modname in ('pkg_resources', 'setuptools', 'site'): + continue + fn = getattr(sys.modules[modname], '__file__', None) + if fn and (normalize_path(fn).startswith(loc) or + fn.startswith(self.location)): + continue + issue_warning( + "Module %s was already imported from %s, but %s is being added" + " to sys.path" % (modname, fn, self.location), + ) + + def has_version(self): + try: + self.version + except ValueError: + issue_warning("Unbuilt egg for " + repr(self)) + return False + return True + + def clone(self,**kw): + """Copy this distribution, substituting in any changed keyword args""" + names = 'project_name version py_version platform location precedence' + for attr in names.split(): + kw.setdefault(attr, getattr(self, attr, None)) + kw.setdefault('metadata', self._provider) + return self.__class__(**kw) + + @property + def extras(self): + return [dep for dep in self._dep_map if dep] + + +class DistInfoDistribution(Distribution): + """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" + PKG_INFO = 'METADATA' + EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") + + @property + def _parsed_pkg_info(self): + """Parse and cache metadata""" + try: + return self._pkg_info + except AttributeError: + metadata = self.get_metadata(self.PKG_INFO) + self._pkg_info = email.parser.Parser().parsestr(metadata) + return self._pkg_info + + @property + def _dep_map(self): + try: + return self.__dep_map + except AttributeError: + self.__dep_map = self._compute_dependencies() + return self.__dep_map + + def _preparse_requirement(self, requires_dist): + """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') + Split environment marker, add == prefix to version specifiers as + necessary, and remove parenthesis. + """ + parts = requires_dist.split(';', 1) + [''] + distvers = parts[0].strip() + mark = parts[1].strip() + distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) + distvers = distvers.replace('(', '').replace(')', '') + return (distvers, mark) + + def _compute_dependencies(self): + """Recompute this distribution's dependencies.""" + from _markerlib import compile as compile_marker + dm = self.__dep_map = {None: []} + + reqs = [] + # Including any condition expressions + for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: + distvers, mark = self._preparse_requirement(req) + parsed = next(parse_requirements(distvers)) + parsed.marker_fn = compile_marker(mark) + reqs.append(parsed) + + def reqs_for_extra(extra): + for req in reqs: + if req.marker_fn(override={'extra':extra}): + yield req + + common = frozenset(reqs_for_extra(None)) + dm[None].extend(common) + + for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: + extra = safe_extra(extra.strip()) + dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) + + return dm + + +_distributionImpl = { + '.egg': Distribution, + '.egg-info': Distribution, + '.dist-info': DistInfoDistribution, + } + + +def issue_warning(*args,**kw): + level = 1 + g = globals() + try: + # find the first stack frame that is *not* code in + # the pkg_resources module, to use for the warning + while sys._getframe(level).f_globals is g: + level += 1 + except ValueError: + pass + warnings.warn(stacklevel=level + 1, *args, **kw) + + +class RequirementParseError(ValueError): + def __str__(self): + return ' '.join(self.args) + + +def parse_requirements(strs): + """Yield ``Requirement`` objects for each specification in `strs` + + `strs` must be a string, or a (possibly-nested) iterable thereof. + """ + # create a steppable iterator, so we can handle \-continuations + lines = iter(yield_lines(strs)) + + def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): + + items = [] + + while not TERMINATOR(line, p): + if CONTINUE(line, p): + try: + line = next(lines) + p = 0 + except StopIteration: + msg = "\\ must not appear on the last nonblank line" + raise RequirementParseError(msg) + + match = ITEM(line, p) + if not match: + msg = "Expected " + item_name + " in" + raise RequirementParseError(msg, line, "at", line[p:]) + + items.append(match.group(*groups)) + p = match.end() + + match = COMMA(line, p) + if match: + # skip the comma + p = match.end() + elif not TERMINATOR(line, p): + msg = "Expected ',' or end-of-list in" + raise RequirementParseError(msg, line, "at", line[p:]) + + match = TERMINATOR(line, p) + # skip the terminator, if any + if match: + p = match.end() + return line, p, items + + for line in lines: + match = DISTRO(line) + if not match: + raise RequirementParseError("Missing distribution spec", line) + project_name = match.group(1) + p = match.end() + extras = [] + + match = OBRACKET(line, p) + if match: + p = match.end() + line, p, extras = scan_list( + DISTRO, CBRACKET, line, p, (1,), "'extra' name" + ) + + line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), + "version spec") + specs = [(op, val) for op, val in specs] + yield Requirement(project_name, specs, extras) + + +class Requirement: + def __init__(self, project_name, specs, extras): + """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" + self.unsafe_name, project_name = project_name, safe_name(project_name) + self.project_name, self.key = project_name, project_name.lower() + self.specifier = packaging.specifiers.SpecifierSet( + ",".join(["".join([x, y]) for x, y in specs]) + ) + self.specs = specs + self.extras = tuple(map(safe_extra, extras)) + self.hashCmp = ( + self.key, + self.specifier, + frozenset(self.extras), + ) + self.__hash = hash(self.hashCmp) + + def __str__(self): + extras = ','.join(self.extras) + if extras: + extras = '[%s]' % extras + return '%s%s%s' % (self.project_name, extras, self.specifier) + + def __eq__(self, other): + return ( + isinstance(other, Requirement) and + self.hashCmp == other.hashCmp + ) + + def __ne__(self, other): + return not self == other + + def __contains__(self, item): + if isinstance(item, Distribution): + if item.key != self.key: + return False + + item = item.version + + # Allow prereleases always in order to match the previous behavior of + # this method. In the future this should be smarter and follow PEP 440 + # more accurately. + return self.specifier.contains(item, prereleases=True) + + def __hash__(self): + return self.__hash + + def __repr__(self): return "Requirement.parse(%r)" % str(self) + + @staticmethod + def parse(s): + reqs = list(parse_requirements(s)) + if reqs: + if len(reqs) == 1: + return reqs[0] + raise ValueError("Expected only one requirement", s) + raise ValueError("No requirements found", s) + + +def _get_mro(cls): + """Get an mro for a type or classic class""" + if not isinstance(cls, type): + class cls(cls, object): pass + return cls.__mro__[1:] + return cls.__mro__ + +def _find_adapter(registry, ob): + """Return an adapter factory for `ob` from `registry`""" + for t in _get_mro(getattr(ob, '__class__', type(ob))): + if t in registry: + return registry[t] + + +def ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + + +def _bypass_ensure_directory(path): + """Sandbox-bypassing version of ensure_directory()""" + if not WRITE_SUPPORT: + raise IOError('"os.mkdir" not supported on this platform.') + dirname, filename = split(path) + if dirname and filename and not isdir(dirname): + _bypass_ensure_directory(dirname) + mkdir(dirname, 0o755) + + +def split_sections(s): + """Split a string or iterable thereof into (section, content) pairs + + Each ``section`` is a stripped version of the section header ("[section]") + and each ``content`` is a list of stripped lines excluding blank lines and + comment-only lines. If there are any such lines before the first section + header, they're returned in a first ``section`` of ``None``. + """ + section = None + content = [] + for line in yield_lines(s): + if line.startswith("["): + if line.endswith("]"): + if section or content: + yield section, content + section = line[1:-1].strip() + content = [] + else: + raise ValueError("Invalid section heading", line) + else: + content.append(line) + + # wrap up last segment + yield section, content + +def _mkstemp(*args,**kw): + old_open = os.open + try: + # temporarily bypass sandboxing + os.open = os_open + return tempfile.mkstemp(*args,**kw) + finally: + # and then put it back + os.open = old_open + + +# Silence the PEP440Warning by default, so that end users don't get hit by it +# randomly just because they use pkg_resources. We want to append the rule +# because we want earlier uses of filterwarnings to take precedence over this +# one. +warnings.filterwarnings("ignore", category=PEP440Warning, append=True) + + +# from jaraco.functools 1.3 +def _call_aside(f, *args, **kwargs): + f(*args, **kwargs) + return f + + +@_call_aside +def _initialize(g=globals()): + "Set up global resource manager (deliberately not state-saved)" + manager = ResourceManager() + g['_manager'] = manager + for name in dir(manager): + if not name.startswith('_'): + g[name] = getattr(manager, name) + + +@_call_aside +def _initialize_master_working_set(): + """ + Prepare the master working set and make the ``require()`` + API available. + + This function has explicit effects on the global state + of pkg_resources. It is intended to be invoked once at + the initialization of this module. + + Invocation by other packages is unsupported and done + at their own risk. + """ + working_set = WorkingSet._build_master() + _declare_state('object', working_set=working_set) + + require = working_set.require + iter_entry_points = working_set.iter_entry_points + add_activation_listener = working_set.subscribe + run_script = working_set.run_script + # backward compatibility + run_main = run_script + # Activate all distributions already on sys.path, and ensure that + # all distributions added to the working set in the future (e.g. by + # calling ``require()``) will get activated as well. + add_activation_listener(lambda dist: dist.activate()) + working_set.entries=[] + # match order + list(map(working_set.add_entry, sys.path)) + globals().update(locals()) diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/__init__.py b/pymode/libs/pkg_resources/_vendor/__init__.py similarity index 100% rename from pymode/libs/pylama/lint/pylama_pylint/logilab/__init__.py rename to pymode/libs/pkg_resources/_vendor/__init__.py diff --git a/pymode/libs/pkg_resources/_vendor/packaging/__about__.py b/pymode/libs/pkg_resources/_vendor/packaging/__about__.py new file mode 100644 index 00000000..eadb794e --- /dev/null +++ b/pymode/libs/pkg_resources/_vendor/packaging/__about__.py @@ -0,0 +1,31 @@ +# Copyright 2014 Donald Stufft +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "15.3" + +__author__ = "Donald Stufft" +__email__ = "donald@stufft.io" + +__license__ = "Apache License, Version 2.0" +__copyright__ = "Copyright 2014 %s" % __author__ diff --git a/pymode/libs/pkg_resources/_vendor/packaging/__init__.py b/pymode/libs/pkg_resources/_vendor/packaging/__init__.py new file mode 100644 index 00000000..c39a8eab --- /dev/null +++ b/pymode/libs/pkg_resources/_vendor/packaging/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2014 Donald Stufft +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +from .__about__ import ( + __author__, __copyright__, __email__, __license__, __summary__, __title__, + __uri__, __version__ +) + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] diff --git a/pymode/libs/pkg_resources/_vendor/packaging/_compat.py b/pymode/libs/pkg_resources/_vendor/packaging/_compat.py new file mode 100644 index 00000000..5c396cea --- /dev/null +++ b/pymode/libs/pkg_resources/_vendor/packaging/_compat.py @@ -0,0 +1,40 @@ +# Copyright 2014 Donald Stufft +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +# flake8: noqa + +if PY3: + string_types = str, +else: + string_types = basestring, + + +def with_metaclass(meta, *bases): + """ + Create a base class with a metaclass. + """ + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/pymode/libs/pkg_resources/_vendor/packaging/_structures.py b/pymode/libs/pkg_resources/_vendor/packaging/_structures.py new file mode 100644 index 00000000..0ae9bb52 --- /dev/null +++ b/pymode/libs/pkg_resources/_vendor/packaging/_structures.py @@ -0,0 +1,78 @@ +# Copyright 2014 Donald Stufft +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + + +class Infinity(object): + + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + +Infinity = Infinity() + + +class NegativeInfinity(object): + + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + +NegativeInfinity = NegativeInfinity() diff --git a/pymode/libs/pkg_resources/_vendor/packaging/specifiers.py b/pymode/libs/pkg_resources/_vendor/packaging/specifiers.py new file mode 100644 index 00000000..891664f0 --- /dev/null +++ b/pymode/libs/pkg_resources/_vendor/packaging/specifiers.py @@ -0,0 +1,784 @@ +# Copyright 2014 Donald Stufft +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import abc +import functools +import itertools +import re + +from ._compat import string_types, with_metaclass +from .version import Version, LegacyVersion, parse + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): + + @abc.abstractmethod + def __str__(self): + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + + @abc.abstractproperty + def prereleases(self): + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value): + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item, prereleases=None): + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter(self, iterable, prereleases=None): + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators = {} + + def __init__(self, spec="", prereleases=None): + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) + + self._spec = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<{0}({1!r}{2})>".format( + self.__class__.__name__, + str(self), + pre, + ) + + def __str__(self): + return "{0}{1}".format(*self._spec) + + def __hash__(self): + return hash(self._spec) + + def __eq__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec == other._spec + + def __ne__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + + def _get_operator(self, op): + return getattr(self, "_compare_{0}".format(self._operators[op])) + + def _coerce_version(self, version): + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self): + return self._spec[0] + + @property + def version(self): + return self._spec[1] + + @property + def prereleases(self): + return self._prereleases + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + return self._get_operator(self.operator)(item, self.version) + + def filter(self, iterable, prereleases=None): + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later incase nothing + # else matches this specifier. + if (parsed_version.is_prerelease + and not (prereleases or self.prereleases)): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the begining. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex = re.compile( + r""" + ^ + \s* + (?P(==|!=|<=|>=|<|>)) + \s* + (?P + [^\s]* # We just match everything, except for whitespace since this + # is a "legacy" specifier and the version string can be just + # about anything. + ) + \s* + $ + """, + re.VERBOSE | re.IGNORECASE, + ) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def _coerce_version(self, version): + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective, spec): + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective, spec): + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective, spec): + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective, spec): + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective, spec): + return prospective > self._coerce_version(spec) + + +def _require_version_compare(fn): + @functools.wraps(fn) + def wrapped(self, prospective, spec): + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex = re.compile( + r""" + ^ + \s* + (?P(~=|==|!=|<=|>=|<|>|===)) + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective, spec): + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore post and dev releases and we want to treat the pre-release as + # it's own separate segment. + prefix = ".".join( + list( + itertools.takewhile( + lambda x: (not x.startswith("post") + and not x.startswith("dev")), + _version_split(spec), + ) + )[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return (self._get_operator(">=")(prospective, spec) + and self._get_operator("==")(prospective, prefix)) + + @_require_version_compare + def _compare_equal(self, prospective, spec): + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + prospective = prospective[:len(spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + spec, prospective = _pad_version(spec, prospective) + else: + # Convert our spec string into a Version + spec = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec.local: + prospective = Version(prospective.public) + + return prospective == spec + + @_require_version_compare + def _compare_not_equal(self, prospective, spec): + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective, spec): + return prospective <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is techincally greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective, spec): + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self): + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version): + result = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _pad_version(left, right): + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split):]) + right_split.append(left[len(right_split):]) + + # Insert our padding + left_split.insert( + 1, + ["0"] * max(0, len(right_split[0]) - len(left_split[0])), + ) + right_split.insert( + 1, + ["0"] * max(0, len(left_split[0]) - len(right_split[0])), + ) + + return ( + list(itertools.chain(*left_split)), + list(itertools.chain(*right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + + def __init__(self, specifiers="", prereleases=None): + # Split on , to break each indidivual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed = set() + for specifier in specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "".format(str(self), pre) + + def __str__(self): + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self): + return hash(self._specs) + + def __and__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __ne__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + + def __len__(self): + return len(self._specs) + + def __iter__(self): + return iter(self._specs) + + @property + def prereleases(self): + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all( + s.contains(item, prereleases=prereleases) + for s in self._specs + ) + + def filter(self, iterable, prereleases=None): + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered = [] + found_prereleases = [] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/pymode/libs/pkg_resources/_vendor/packaging/version.py b/pymode/libs/pkg_resources/_vendor/packaging/version.py new file mode 100644 index 00000000..4ba574b9 --- /dev/null +++ b/pymode/libs/pkg_resources/_vendor/packaging/version.py @@ -0,0 +1,403 @@ +# Copyright 2014 Donald Stufft +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import collections +import itertools +import re + +from ._structures import Infinity + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" +] + + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion(object): + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                              # pre-release
    +            [-_\.]?
    +            (?P(a|b|c|rc|alpha|beta|pre|preview))
    +            [-_\.]?
    +            (?P[0-9]+)?
    +        )?
    +        (?P                                         # post release
    +            (?:-(?P[0-9]+))
    +            |
    +            (?:
    +                [-_\.]?
    +                (?Ppost|rev|r)
    +                [-_\.]?
    +                (?P[0-9]+)?
    +            )
    +        )?
    +        (?P                                          # dev release
    +            [-_\.]?
    +            (?Pdev)
    +            [-_\.]?
    +            (?P[0-9]+)?
    +        )?
    +    )
    +    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
    +"""
    +
    +
    +class Version(_BaseVersion):
    +
    +    _regex = re.compile(
    +        r"^\s*" + VERSION_PATTERN + r"\s*$",
    +        re.VERBOSE | re.IGNORECASE,
    +    )
    +
    +    def __init__(self, version):
    +        # Validate the version and parse it into pieces
    +        match = self._regex.search(version)
    +        if not match:
    +            raise InvalidVersion("Invalid version: '{0}'".format(version))
    +
    +        # Store the parsed out pieces of the version
    +        self._version = _Version(
    +            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
    +            release=tuple(int(i) for i in match.group("release").split(".")),
    +            pre=_parse_letter_version(
    +                match.group("pre_l"),
    +                match.group("pre_n"),
    +            ),
    +            post=_parse_letter_version(
    +                match.group("post_l"),
    +                match.group("post_n1") or match.group("post_n2"),
    +            ),
    +            dev=_parse_letter_version(
    +                match.group("dev_l"),
    +                match.group("dev_n"),
    +            ),
    +            local=_parse_local_version(match.group("local")),
    +        )
    +
    +        # Generate a key which will be used for sorting
    +        self._key = _cmpkey(
    +            self._version.epoch,
    +            self._version.release,
    +            self._version.pre,
    +            self._version.post,
    +            self._version.dev,
    +            self._version.local,
    +        )
    +
    +    def __repr__(self):
    +        return "".format(repr(str(self)))
    +
    +    def __str__(self):
    +        parts = []
    +
    +        # Epoch
    +        if self._version.epoch != 0:
    +            parts.append("{0}!".format(self._version.epoch))
    +
    +        # Release segment
    +        parts.append(".".join(str(x) for x in self._version.release))
    +
    +        # Pre-release
    +        if self._version.pre is not None:
    +            parts.append("".join(str(x) for x in self._version.pre))
    +
    +        # Post-release
    +        if self._version.post is not None:
    +            parts.append(".post{0}".format(self._version.post[1]))
    +
    +        # Development release
    +        if self._version.dev is not None:
    +            parts.append(".dev{0}".format(self._version.dev[1]))
    +
    +        # Local version segment
    +        if self._version.local is not None:
    +            parts.append(
    +                "+{0}".format(".".join(str(x) for x in self._version.local))
    +            )
    +
    +        return "".join(parts)
    +
    +    @property
    +    def public(self):
    +        return str(self).split("+", 1)[0]
    +
    +    @property
    +    def base_version(self):
    +        parts = []
    +
    +        # Epoch
    +        if self._version.epoch != 0:
    +            parts.append("{0}!".format(self._version.epoch))
    +
    +        # Release segment
    +        parts.append(".".join(str(x) for x in self._version.release))
    +
    +        return "".join(parts)
    +
    +    @property
    +    def local(self):
    +        version_string = str(self)
    +        if "+" in version_string:
    +            return version_string.split("+", 1)[1]
    +
    +    @property
    +    def is_prerelease(self):
    +        return bool(self._version.dev or self._version.pre)
    +
    +    @property
    +    def is_postrelease(self):
    +        return bool(self._version.post)
    +
    +
    +def _parse_letter_version(letter, number):
    +    if letter:
    +        # We consider there to be an implicit 0 in a pre-release if there is
    +        # not a numeral associated with it.
    +        if number is None:
    +            number = 0
    +
    +        # We normalize any letters to their lower case form
    +        letter = letter.lower()
    +
    +        # We consider some words to be alternate spellings of other words and
    +        # in those cases we want to normalize the spellings to our preferred
    +        # spelling.
    +        if letter == "alpha":
    +            letter = "a"
    +        elif letter == "beta":
    +            letter = "b"
    +        elif letter in ["c", "pre", "preview"]:
    +            letter = "rc"
    +        elif letter in ["rev", "r"]:
    +            letter = "post"
    +
    +        return letter, int(number)
    +    if not letter and number:
    +        # We assume if we are given a number, but we are not given a letter
    +        # then this is using the implicit post release syntax (e.g. 1.0-1)
    +        letter = "post"
    +
    +        return letter, int(number)
    +
    +
    +_local_version_seperators = re.compile(r"[\._-]")
    +
    +
    +def _parse_local_version(local):
    +    """
    +    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    +    """
    +    if local is not None:
    +        return tuple(
    +            part.lower() if not part.isdigit() else int(part)
    +            for part in _local_version_seperators.split(local)
    +        )
    +
    +
    +def _cmpkey(epoch, release, pre, post, dev, local):
    +    # When we compare a release version, we want to compare it with all of the
    +    # trailing zeros removed. So we'll use a reverse the list, drop all the now
    +    # leading zeros until we come to something non zero, then take the rest
    +    # re-reverse it back into the correct order and make it a tuple and use
    +    # that for our sorting key.
    +    release = tuple(
    +        reversed(list(
    +            itertools.dropwhile(
    +                lambda x: x == 0,
    +                reversed(release),
    +            )
    +        ))
    +    )
    +
    +    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
    +    # We'll do this by abusing the pre segment, but we _only_ want to do this
    +    # if there is not a pre or a post segment. If we have one of those then
    +    # the normal sorting rules will handle this case correctly.
    +    if pre is None and post is None and dev is not None:
    +        pre = -Infinity
    +    # Versions without a pre-release (except as noted above) should sort after
    +    # those with one.
    +    elif pre is None:
    +        pre = Infinity
    +
    +    # Versions without a post segment should sort before those with one.
    +    if post is None:
    +        post = -Infinity
    +
    +    # Versions without a development segment should sort after those with one.
    +    if dev is None:
    +        dev = Infinity
    +
    +    if local is None:
    +        # Versions without a local segment should sort before those with one.
    +        local = -Infinity
    +    else:
    +        # Versions with a local segment need that segment parsed to implement
    +        # the sorting rules in PEP440.
    +        # - Alpha numeric segments sort before numeric segments
    +        # - Alpha numeric segments sort lexicographically
    +        # - Numeric segments sort numerically
    +        # - Shorter versions sort before longer versions when the prefixes
    +        #   match exactly
    +        local = tuple(
    +            (i, "") if isinstance(i, int) else (-Infinity, i)
    +            for i in local
    +        )
    +
    +    return epoch, release, pre, post, dev, local
    diff --git a/pymode/libs/pyflakes/__init__.py b/pymode/libs/pyflakes/__init__.py
    new file mode 100644
    index 00000000..1f047803
    --- /dev/null
    +++ b/pymode/libs/pyflakes/__init__.py
    @@ -0,0 +1 @@
    +__version__ = '0.9.2'
    diff --git a/pymode/libs/pyflakes/__main__.py b/pymode/libs/pyflakes/__main__.py
    new file mode 100644
    index 00000000..a69e6891
    --- /dev/null
    +++ b/pymode/libs/pyflakes/__main__.py
    @@ -0,0 +1,5 @@
    +from pyflakes.api import main
    +
    +# python -m pyflakes (with Python >= 2.7)
    +if __name__ == '__main__':
    +    main(prog='pyflakes')
    diff --git a/pymode/libs/pyflakes/api.py b/pymode/libs/pyflakes/api.py
    new file mode 100644
    index 00000000..3bc23306
    --- /dev/null
    +++ b/pymode/libs/pyflakes/api.py
    @@ -0,0 +1,175 @@
    +"""
    +API for the command-line I{pyflakes} tool.
    +"""
    +from __future__ import with_statement
    +
    +import sys
    +import os
    +import _ast
    +
    +from pyflakes import checker, __version__
    +from pyflakes import reporter as modReporter
    +
    +__all__ = ['check', 'checkPath', 'checkRecursive', 'iterSourceCode', 'main']
    +
    +
    +def check(codeString, filename, reporter=None):
    +    """
    +    Check the Python source given by C{codeString} for flakes.
    +
    +    @param codeString: The Python source to check.
    +    @type codeString: C{str}
    +
    +    @param filename: The name of the file the source came from, used to report
    +        errors.
    +    @type filename: C{str}
    +
    +    @param reporter: A L{Reporter} instance, where errors and warnings will be
    +        reported.
    +
    +    @return: The number of warnings emitted.
    +    @rtype: C{int}
    +    """
    +    if reporter is None:
    +        reporter = modReporter._makeDefaultReporter()
    +    # First, compile into an AST and handle syntax errors.
    +    try:
    +        tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
    +    except SyntaxError:
    +        value = sys.exc_info()[1]
    +        msg = value.args[0]
    +
    +        (lineno, offset, text) = value.lineno, value.offset, value.text
    +
    +        # If there's an encoding problem with the file, the text is None.
    +        if text is None:
    +            # Avoid using msg, since for the only known case, it contains a
    +            # bogus message that claims the encoding the file declared was
    +            # unknown.
    +            reporter.unexpectedError(filename, 'problem decoding source')
    +        else:
    +            reporter.syntaxError(filename, msg, lineno, offset, text)
    +        return 1
    +    except Exception:
    +        reporter.unexpectedError(filename, 'problem decoding source')
    +        return 1
    +    # Okay, it's syntactically valid.  Now check it.
    +    w = checker.Checker(tree, filename)
    +    w.messages.sort(key=lambda m: m.lineno)
    +    for warning in w.messages:
    +        reporter.flake(warning)
    +    return len(w.messages)
    +
    +
    +def checkPath(filename, reporter=None):
    +    """
    +    Check the given path, printing out any warnings detected.
    +
    +    @param reporter: A L{Reporter} instance, where errors and warnings will be
    +        reported.
    +
    +    @return: the number of warnings printed
    +    """
    +    if reporter is None:
    +        reporter = modReporter._makeDefaultReporter()
    +    try:
    +        # in Python 2.6, compile() will choke on \r\n line endings. In later
    +        # versions of python it's smarter, and we want binary mode to give
    +        # compile() the best opportunity to do the right thing WRT text
    +        # encodings.
    +        if sys.version_info < (2, 7):
    +            mode = 'rU'
    +        else:
    +            mode = 'rb'
    +
    +        with open(filename, mode) as f:
    +            codestr = f.read()
    +        if sys.version_info < (2, 7):
    +            codestr += '\n'     # Work around for Python <= 2.6
    +    except UnicodeError:
    +        reporter.unexpectedError(filename, 'problem decoding source')
    +        return 1
    +    except IOError:
    +        msg = sys.exc_info()[1]
    +        reporter.unexpectedError(filename, msg.args[1])
    +        return 1
    +    return check(codestr, filename, reporter)
    +
    +
    +def iterSourceCode(paths):
    +    """
    +    Iterate over all Python source files in C{paths}.
    +
    +    @param paths: A list of paths.  Directories will be recursed into and
    +        any .py files found will be yielded.  Any non-directories will be
    +        yielded as-is.
    +    """
    +    for path in paths:
    +        if os.path.isdir(path):
    +            for dirpath, dirnames, filenames in os.walk(path):
    +                for filename in filenames:
    +                    if filename.endswith('.py'):
    +                        yield os.path.join(dirpath, filename)
    +        else:
    +            yield path
    +
    +
    +def checkRecursive(paths, reporter):
    +    """
    +    Recursively check all source files in C{paths}.
    +
    +    @param paths: A list of paths to Python source files and directories
    +        containing Python source files.
    +    @param reporter: A L{Reporter} where all of the warnings and errors
    +        will be reported to.
    +    @return: The number of warnings found.
    +    """
    +    warnings = 0
    +    for sourcePath in iterSourceCode(paths):
    +        warnings += checkPath(sourcePath, reporter)
    +    return warnings
    +
    +
    +def _exitOnSignal(sigName, message):
    +    """Handles a signal with sys.exit.
    +
    +    Some of these signals (SIGPIPE, for example) don't exist or are invalid on
    +    Windows. So, ignore errors that might arise.
    +    """
    +    import signal
    +
    +    try:
    +        sigNumber = getattr(signal, sigName)
    +    except AttributeError:
    +        # the signal constants defined in the signal module are defined by
    +        # whether the C library supports them or not. So, SIGPIPE might not
    +        # even be defined.
    +        return
    +
    +    def handler(sig, f):
    +        sys.exit(message)
    +
    +    try:
    +        signal.signal(sigNumber, handler)
    +    except ValueError:
    +        # It's also possible the signal is defined, but then it's invalid. In
    +        # this case, signal.signal raises ValueError.
    +        pass
    +
    +
    +def main(prog=None):
    +    """Entry point for the script "pyflakes"."""
    +    import optparse
    +
    +    # Handle "Keyboard Interrupt" and "Broken pipe" gracefully
    +    _exitOnSignal('SIGINT', '... stopped')
    +    _exitOnSignal('SIGPIPE', 1)
    +
    +    parser = optparse.OptionParser(prog=prog, version=__version__)
    +    (__, args) = parser.parse_args()
    +    reporter = modReporter._makeDefaultReporter()
    +    if args:
    +        warnings = checkRecursive(args, reporter)
    +    else:
    +        warnings = check(sys.stdin.read(), '', reporter)
    +    raise SystemExit(warnings > 0)
    diff --git a/pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/checker.py b/pymode/libs/pyflakes/checker.py
    similarity index 92%
    rename from pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/checker.py
    rename to pymode/libs/pyflakes/checker.py
    index 70558324..e6e19427 100644
    --- a/pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/checker.py
    +++ b/pymode/libs/pyflakes/checker.py
    @@ -448,6 +448,10 @@ def addBinding(self, node, value):
                 elif isinstance(existing, Importation) and value.redefines(existing):
                     existing.redefined.append(node)
     
    +        if value.name in self.scope:
    +            # then assume the rebound name is used as a global or within a loop
    +            value.used = self.scope[value.name].used
    +
             self.scope[value.name] = value
     
         def getNodeHandler(self, node_class):
    @@ -471,7 +475,7 @@ def handleNodeLoad(self, node):
                 return
     
             scopes = [scope for scope in self.scopeStack[:-1]
    -                  if isinstance(scope, (FunctionScope, ModuleScope))]
    +                  if isinstance(scope, (FunctionScope, ModuleScope, GeneratorScope))]
             if isinstance(self.scope, GeneratorScope) and scopes[-1] != self.scopeStack[-2]:
                 scopes.append(self.scopeStack[-2])
     
    @@ -526,14 +530,30 @@ def handleNodeStore(self, node):
                 binding = ExportBinding(name, node.parent, self.scope)
             else:
                 binding = Assignment(name, node)
    -        if name in self.scope:
    -            binding.used = self.scope[name].used
             self.addBinding(node, binding)
     
         def handleNodeDelete(self, node):
    +
    +        def on_conditional_branch():
    +            """
    +            Return `True` if node is part of a conditional body.
    +            """
    +            current = getattr(node, 'parent', None)
    +            while current:
    +                if isinstance(current, (ast.If, ast.While, ast.IfExp)):
    +                    return True
    +                current = getattr(current, 'parent', None)
    +            return False
    +
             name = getNodeName(node)
             if not name:
                 return
    +
    +        if on_conditional_branch():
    +            # We can not predict if this conditional branch is going to
    +            # be executed.
    +            return
    +
             if isinstance(self.scope, FunctionScope) and name in self.scope.globals:
                 self.scope.globals.remove(name)
             else:
    @@ -630,8 +650,9 @@ def ignore(self, node):
             pass
     
         # "stmt" type nodes
    -    DELETE = PRINT = FOR = WHILE = IF = WITH = WITHITEM = RAISE = \
    -        TRYFINALLY = ASSERT = EXEC = EXPR = ASSIGN = handleChildren
    +    DELETE = PRINT = FOR = ASYNCFOR = WHILE = IF = WITH = WITHITEM = \
    +        ASYNCWITH = ASYNCWITHITEM = RAISE = TRYFINALLY = ASSERT = EXEC = \
    +        EXPR = ASSIGN = handleChildren
     
         CONTINUE = BREAK = PASS = ignore
     
    @@ -654,14 +675,36 @@ def ignore(self, node):
             EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = ignore
     
         # additional node types
    -    LISTCOMP = COMPREHENSION = KEYWORD = handleChildren
    +    COMPREHENSION = KEYWORD = handleChildren
     
         def GLOBAL(self, node):
             """
             Keep track of globals declarations.
             """
    -        if isinstance(self.scope, FunctionScope):
    -            self.scope.globals.update(node.names)
    +        # In doctests, the global scope is an anonymous function at index 1.
    +        global_scope_index = 1 if self.withDoctest else 0
    +        global_scope = self.scopeStack[global_scope_index]
    +
    +        # Ignore 'global' statement in global scope.
    +        if self.scope is not global_scope:
    +
    +            # One 'global' statement can bind multiple (comma-delimited) names.
    +            for node_name in node.names:
    +                node_value = Assignment(node_name, node)
    +
    +                # Remove UndefinedName messages already reported for this name.
    +                self.messages = [
    +                    m for m in self.messages if not
    +                    isinstance(m, messages.UndefinedName) and not
    +                    m.message_args[0] == node_name]
    +
    +                # Bind name to global scope if it doesn't exist already.
    +                global_scope.setdefault(node_name, node_value)
    +
    +                # Bind name to non-global scopes, but as already "used".
    +                node_value.used = (global_scope, node)
    +                for scope in self.scopeStack[global_scope_index + 1:]:
    +                    scope[node_name] = node_value
     
         NONLOCAL = GLOBAL
     
    @@ -670,6 +713,8 @@ def GENERATOREXP(self, node):
             self.handleChildren(node)
             self.popScope()
     
    +    LISTCOMP = handleChildren if PY2 else GENERATOREXP
    +
         DICTCOMP = SETCOMP = GENERATOREXP
     
         def NAME(self, node):
    @@ -693,6 +738,10 @@ def NAME(self, node):
                 raise RuntimeError("Got impossible expression context: %r" % (node.ctx,))
     
         def RETURN(self, node):
    +        if isinstance(self.scope, ClassScope):
    +            self.report(messages.ReturnOutsideFunction, node)
    +            return
    +
             if (
                 node.value and
                 hasattr(self.scope, 'returnValue') and
    @@ -705,7 +754,7 @@ def YIELD(self, node):
             self.scope.isGenerator = True
             self.handleNode(node.value, node)
     
    -    YIELDFROM = YIELD
    +    AWAIT = YIELDFROM = YIELD
     
         def FUNCTIONDEF(self, node):
             for deco in node.decorator_list:
    @@ -715,6 +764,8 @@ def FUNCTIONDEF(self, node):
             if self.withDoctest:
                 self.deferFunction(lambda: self.handleDoctests(node))
     
    +    ASYNCFUNCTIONDEF = FUNCTIONDEF
    +
         def LAMBDA(self, node):
             args = []
             annotations = []
    diff --git a/pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/messages.py b/pymode/libs/pyflakes/messages.py
    similarity index 94%
    rename from pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/messages.py
    rename to pymode/libs/pyflakes/messages.py
    index 1f799ec5..8899b7b0 100644
    --- a/pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/messages.py
    +++ b/pymode/libs/pyflakes/messages.py
    @@ -100,14 +100,6 @@ def __init__(self, filename, loc, name):
             self.message_args = (name,)
     
     
    -class Redefined(Message):
    -    message = 'redefinition of %r from line %r'
    -
    -    def __init__(self, filename, loc, name, orig_loc):
    -        Message.__init__(self, filename, loc)
    -        self.message_args = (name, orig_loc.lineno)
    -
    -
     class LateFutureImport(Message):
         message = 'future import(s) %r after other statements'
     
    @@ -133,3 +125,10 @@ class ReturnWithArgsInsideGenerator(Message):
         Indicates a return statement with arguments inside a generator.
         """
         message = '\'return\' with argument inside generator'
    +
    +
    +class ReturnOutsideFunction(Message):
    +    """
    +    Indicates a return statement outside of a function/method.
    +    """
    +    message = '\'return\' outside function'
    diff --git a/pymode/libs/pyflakes/reporter.py b/pymode/libs/pyflakes/reporter.py
    new file mode 100644
    index 00000000..ae645bdf
    --- /dev/null
    +++ b/pymode/libs/pyflakes/reporter.py
    @@ -0,0 +1,81 @@
    +"""
    +Provide the Reporter class.
    +"""
    +
    +import re
    +import sys
    +
    +
    +class Reporter(object):
    +    """
    +    Formats the results of pyflakes checks to users.
    +    """
    +
    +    def __init__(self, warningStream, errorStream):
    +        """
    +        Construct a L{Reporter}.
    +
    +        @param warningStream: A file-like object where warnings will be
    +            written to.  The stream's C{write} method must accept unicode.
    +            C{sys.stdout} is a good value.
    +        @param errorStream: A file-like object where error output will be
    +            written to.  The stream's C{write} method must accept unicode.
    +            C{sys.stderr} is a good value.
    +        """
    +        self._stdout = warningStream
    +        self._stderr = errorStream
    +
    +    def unexpectedError(self, filename, msg):
    +        """
    +        An unexpected error occurred trying to process C{filename}.
    +
    +        @param filename: The path to a file that we could not process.
    +        @ptype filename: C{unicode}
    +        @param msg: A message explaining the problem.
    +        @ptype msg: C{unicode}
    +        """
    +        self._stderr.write("%s: %s\n" % (filename, msg))
    +
    +    def syntaxError(self, filename, msg, lineno, offset, text):
    +        """
    +        There was a syntax errror in C{filename}.
    +
    +        @param filename: The path to the file with the syntax error.
    +        @ptype filename: C{unicode}
    +        @param msg: An explanation of the syntax error.
    +        @ptype msg: C{unicode}
    +        @param lineno: The line number where the syntax error occurred.
    +        @ptype lineno: C{int}
    +        @param offset: The column on which the syntax error occurred, or None.
    +        @ptype offset: C{int}
    +        @param text: The source code containing the syntax error.
    +        @ptype text: C{unicode}
    +        """
    +        line = text.splitlines()[-1]
    +        if offset is not None:
    +            offset = offset - (len(text) - len(line))
    +            self._stderr.write('%s:%d:%d: %s\n' %
    +                               (filename, lineno, offset + 1, msg))
    +        else:
    +            self._stderr.write('%s:%d: %s\n' % (filename, lineno, msg))
    +        self._stderr.write(line)
    +        self._stderr.write('\n')
    +        if offset is not None:
    +            self._stderr.write(re.sub(r'\S', ' ', line[:offset]) +
    +                               "^\n")
    +
    +    def flake(self, message):
    +        """
    +        pyflakes found something wrong with the code.
    +
    +        @param: A L{pyflakes.messages.Message}.
    +        """
    +        self._stdout.write(str(message))
    +        self._stdout.write('\n')
    +
    +
    +def _makeDefaultReporter():
    +    """
    +    Make a reporter that can be used when no reporter is specified.
    +    """
    +    return Reporter(sys.stdout, sys.stderr)
    diff --git a/pymode/libs/pylama/__init__.py b/pymode/libs/pylama/__init__.py
    index 1576bfd2..6662db91 100644
    --- a/pymode/libs/pylama/__init__.py
    +++ b/pymode/libs/pylama/__init__.py
    @@ -1,11 +1,11 @@
    -""" Code audit tool for python.
    +"""
    +Code audit tool for python.
     
     :copyright: 2013 by Kirill Klenov.
     :license: BSD, see LICENSE for more details.
    -
     """
     
    -__version__ = "5.0.5"
    +__version__ = "7.0.3"
     __project__ = "pylama"
     __author__ = "Kirill Klenov "
     __license__ = "GNU LGPL"
    diff --git a/pymode/libs/pylama/__main__.py b/pymode/libs/pylama/__main__.py
    new file mode 100644
    index 00000000..64994e75
    --- /dev/null
    +++ b/pymode/libs/pylama/__main__.py
    @@ -0,0 +1,6 @@
    +"""Support the module execution."""
    +
    +from .main import shell
    +
    +if __name__ == '__main__':
    +    shell()
    diff --git a/pymode/libs/pylama/tasks.py b/pymode/libs/pylama/async.py
    similarity index 55%
    rename from pymode/libs/pylama/tasks.py
    rename to pymode/libs/pylama/async.py
    index 69881e1e..12f929fa 100644
    --- a/pymode/libs/pylama/tasks.py
    +++ b/pymode/libs/pylama/async.py
    @@ -1,15 +1,16 @@
    -""" Support for asyncronious code checking. """
    +""" Support for asyncronious checking. """
     
     import logging
     import threading
    -from os import path as op
    +
    +from .core import run
    +
    +
     try:
         import Queue
     except ImportError:
         import queue as Queue
     
    -from .core import run
    -
     
     try:
         import multiprocessing
    @@ -36,41 +37,33 @@ def run(self):
             """ Run tasks from queue. """
             while True:
                 path, params = self.path_queue.get()
    -            errors = check_path(path, **params)
    +            errors = run(path, **params)
                 self.result_queue.put(errors)
                 self.path_queue.task_done()
     
     
    -def async_check_files(paths, options, rootpath=None):
    -    """ Check paths.
    +def check_async(paths, options, rootdir=None):
    +    """ Check given paths asynchronously.
     
         :return list: list of errors
     
         """
    -    errors = []
    -
    -    # Disable async if pylint enabled
    -    async = options.async and 'pylint' not in options.linters
    -
    -    if not async:
    -        for path in paths:
    -            errors += check_path(path, options=options, rootpath=rootpath)
    -        return errors
    -
         LOGGER.info('Async code checking is enabled.')
         path_queue = Queue.Queue()
         result_queue = Queue.Queue()
     
    -    for _ in range(CPU_COUNT):
    +    for num in range(CPU_COUNT):
             worker = Worker(path_queue, result_queue)
             worker.setDaemon(True)
    +        LOGGER.info('Start worker #%s', (num + 1))
             worker.start()
     
         for path in paths:
    -        path_queue.put((path, dict(options=options, rootpath=rootpath)))
    +        path_queue.put((path, dict(options=options, rootdir=rootdir)))
     
         path_queue.join()
     
    +    errors = []
         while True:
             try:
                 errors += result_queue.get(False)
    @@ -80,23 +73,4 @@ def async_check_files(paths, options, rootpath=None):
         return errors
     
     
    -def check_path(path, options=None, rootpath=None, code=None):
    -    """ Check path.
    -
    -    :return list: list of errors
    -
    -    """
    -    LOGGER.info("Parse file: %s", path)
    -
    -    rootpath = rootpath or '.'
    -    errors = []
    -    for error in run(path, code, options):
    -        try:
    -            error._info['rel'] = op.relpath(error.filename, rootpath)
    -            errors.append(error)
    -        except KeyError:
    -            continue
    -
    -    return errors
    -
    -# pylama:ignore=W0212
    +# pylama:ignore=W0212,D210,F0001
    diff --git a/pymode/libs/pylama/config.py b/pymode/libs/pylama/config.py
    index 881e930a..3df38829 100644
    --- a/pymode/libs/pylama/config.py
    +++ b/pymode/libs/pylama/config.py
    @@ -1,8 +1,8 @@
     """ Parse arguments from command line and configuration files. """
     import fnmatch
    -import sys
     import os
    -from re import compile as re
    +import sys
    +import re
     
     import logging
     from argparse import ArgumentParser
    @@ -11,6 +11,17 @@
     from .libs.inirama import Namespace
     from .lint.extensions import LINTERS
     
    +#: A default checkers
    +DEFAULT_LINTERS = 'pep8', 'pyflakes', 'mccabe'
    +
    +CURDIR = os.getcwd()
    +CONFIG_FILES = 'pylama.ini', 'setup.cfg', 'tox.ini', 'pytest.ini'
    +
    +#: The skip pattern
    +SKIP_PATTERN = re.compile(r'# *noqa\b', re.I).search
    +
    +# Parse a modelines
    +MODELINE_RE = re.compile(r'^\s*#\s+(?:pylama:)\s*((?:[\w_]*=[^:\n\s]+:?)+)', re.I | re.M)
     
     # Setup a logger
     LOGGER = logging.getLogger('pylama')
    @@ -18,15 +29,6 @@
     STREAM = logging.StreamHandler(sys.stdout)
     LOGGER.addHandler(STREAM)
     
    -#: A default checkers
    -DEFAULT_LINTERS = 'pep8', 'pyflakes', 'mccabe'
    -
    -CURDIR = os.getcwd()
    -CONFIG_FILES = [
    -    os.path.join(CURDIR, basename) for basename in
    -    ('pylama.ini', 'setup.cfg', 'tox.ini', 'pytest.ini')
    -]
    -
     
     class _Default(object):
     
    @@ -69,8 +71,8 @@ def parse_linters(linters):
     
     PARSER = ArgumentParser(description="Code audit tool for python.")
     PARSER.add_argument(
    -    "path", nargs='?', default=_Default(CURDIR),
    -    help="Path on file or directory for code check.")
    +    "paths", nargs='*', default=_Default([CURDIR]),
    +    help="Paths to files or directories for code check.")
     
     PARSER.add_argument(
         "--verbose", "-v", action='store_true', help="Verbose mode.")
    @@ -86,6 +88,9 @@ def parse_linters(linters):
         "--select", "-s", default=_Default(''), type=split_csp_str,
         help="Select errors and warnings. (comma-separated list)")
     
    +PARSER.add_argument(
    +    "--sort", default=_Default(''), type=split_csp_str,
    +    help="Sort result by error types. Ex. E,W,D")
     
     PARSER.add_argument(
         "--linters", "-l", default=_Default(','.join(DEFAULT_LINTERS)),
    @@ -100,7 +105,7 @@ def parse_linters(linters):
     
     PARSER.add_argument(
         "--skip", default=_Default(''),
    -    type=lambda s: [re(fnmatch.translate(p)) for p in s.split(',') if p],
    +    type=lambda s: [re.compile(fnmatch.translate(p)) for p in s.split(',') if p],
         help="Skip files by masks (comma-separated, Ex. */messages.py)")
     
     PARSER.add_argument("--report", "-r", help="Send report to file [REPORT]")
    @@ -120,11 +125,15 @@ def parse_linters(linters):
         "--force", "-F", action='store_true', default=_Default(False),
         help="Force code checking (if linter doesnt allow)")
     
    +PARSER.add_argument(
    +    "--abspath", "-a", action='store_true', default=_Default(False),
    +    help="Use absolute paths in output.")
    +
     
     ACTIONS = dict((a.dest, a) for a in PARSER._actions)
     
     
    -def parse_options(args=None, config=True, **overrides): # noqa
    +def parse_options(args=None, config=True, rootdir=CURDIR, **overrides): # noqa
         """ Parse options from command line and configuration files.
     
         :return argparse.Namespace:
    @@ -146,11 +155,13 @@ def parse_options(args=None, config=True, **overrides): # noqa
     
         # Compile options from ini
         if config:
    -        cfg = get_config(str(options.options))
    +        cfg = get_config(str(options.options), rootdir=rootdir)
             for k, v in cfg.default.items():
                 LOGGER.info('Find option %s (%s)', k, v)
                 passed_value = getattr(options, k, _Default())
                 if isinstance(passed_value, _Default):
    +                if k == 'paths':
    +                    v = v.split()
                     setattr(options, k, _Default(v))
     
             # Parse file related options
    @@ -168,7 +179,7 @@ def parse_options(args=None, config=True, **overrides): # noqa
                     options.linters_params[name] = dict(opts)
                     continue
     
    -            mask = re(fnmatch.translate(name))
    +            mask = re.compile(fnmatch.translate(name))
                 options.file_params[mask] = dict(opts)
     
         # Postprocess options
    @@ -177,6 +188,10 @@ def parse_options(args=None, config=True, **overrides): # noqa
             if isinstance(value, _Default):
                 setattr(options, name, process_value(name, value.value))
     
    +    if options.async and 'pylint' in options.linters:
    +        LOGGER.warn('Cant parse code asynchronously while pylint is enabled.')
    +        options.async = False
    +
         return options
     
     
    @@ -195,7 +210,7 @@ def process_value(name, value):
         return value
     
     
    -def get_config(ini_path=None):
    +def get_config(ini_path=None, rootdir=CURDIR):
         """ Load configuration from INI.
     
         :return Namespace:
    @@ -206,6 +221,7 @@ def get_config(ini_path=None):
     
         if not ini_path:
             for path in CONFIG_FILES:
    +            path = os.path.join(rootdir, path)
                 if os.path.isfile(path) and os.access(path, os.R_OK):
                     config.read(path)
         else:
    @@ -222,4 +238,4 @@ def setup_logger(options):
             LOGGER.addHandler(logging.FileHandler(options.report, mode='w'))
         LOGGER.info('Try to read configuration from: ' + options.options)
     
    -# pylama:ignore=W0212
    +# pylama:ignore=W0212,D210,F0001
    diff --git a/pymode/libs/pylama/core.py b/pymode/libs/pylama/core.py
    index 1283a662..c0522bf4 100644
    --- a/pymode/libs/pylama/core.py
    +++ b/pymode/libs/pylama/core.py
    @@ -3,33 +3,25 @@
     Prepare params, check a modeline and run the checkers.
     
     """
    -import re
    -
     import logging
    -from collections import defaultdict
     
    -from .config import process_value, LOGGER
    +import os.path as op
    +from .config import process_value, LOGGER, MODELINE_RE, SKIP_PATTERN, CURDIR
    +from .errors import Error, remove_duplicates
     from .lint.extensions import LINTERS
    -from .errors import DUPLICATES, Error
    -
    -
    -#: The skip pattern
    -SKIP_PATTERN = re.compile(r'# *noqa\b', re.I).search
    -
    -# Parse a modelines
    -MODELINE_RE = re.compile(
    -    r'^\s*#\s+(?:pylama:)\s*((?:[\w_]*=[^:\n\s]+:?)+)',
    -    re.I | re.M)
     
     
    -def run(path='', code=None, options=None):
    -    """ Run a code checkers with given params.
    +def run(path='', code=None, rootdir=CURDIR, options=None):
    +    """ Run code checkers with given params.
     
    +    :param path: (str) A file's path.
    +    :param code: (str) A code source
         :return errors: list of dictionaries with error's information
     
         """
         errors = []
         fileconfig = dict()
    +    lname = 'undefined'
         params = dict()
         linters = LINTERS
         linters_params = dict()
    @@ -43,6 +35,7 @@ def run(path='', code=None, options=None):
     
         try:
             with CodeContext(code, path) as ctx:
    +            path = op.relpath(path, rootdir)
                 code = ctx.code
                 params = prepare_params(parse_modeline(code), fileconfig, options)
                 LOGGER.debug('Checking params: %s', params)
    @@ -50,7 +43,7 @@ def run(path='', code=None, options=None):
                 if params.get('skip'):
                     return errors
     
    -            for item in linters:
    +            for item in params.get('linters') or linters:
     
                     if not isinstance(item, tuple):
                         item = (item, LINTERS.get(item))
    @@ -89,7 +82,11 @@ def run(path='', code=None, options=None):
         if code and errors:
             errors = filter_skiplines(code, errors)
     
    -    return sorted(errors, key=lambda e: e.lnum)
    +    key = lambda e: e.lnum
    +    if options and options.sort:
    +        sort = dict((v, n) for n, v in enumerate(options.sort, 1))
    +        key = lambda e: (sort.get(e.type, 999), e.lnum)
    +    return sorted(errors, key=key)
     
     
     def parse_modeline(code):
    @@ -111,13 +108,13 @@ def prepare_params(modeline, fileconfig, options):
         :return dict:
     
         """
    -    params = dict(skip=False, ignore=[], select=[])
    +    params = dict(skip=False, ignore=[], select=[], linters=[])
         if options:
    -        params['ignore'] = options.ignore
    -        params['select'] = options.select
    +        params['ignore'] = list(options.ignore)
    +        params['select'] = list(options.select)
     
         for config in filter(None, [modeline, fileconfig]):
    -        for key in ('ignore', 'select'):
    +        for key in ('ignore', 'select', 'linters'):
                 params[key] += process_value(key, config.get(key, []))
             params['skip'] = bool(int(config.get('skip', False)))
     
    @@ -170,18 +167,6 @@ def filter_skiplines(code, errors):
         return errors
     
     
    -def remove_duplicates(errors):
    -    """ Remove same errors from others linters. """
    -    passed = defaultdict(list)
    -    for error in errors:
    -        key = error.linter, error.number
    -        if key in DUPLICATES:
    -            if key in passed[error.lnum]:
    -                continue
    -            passed[error.lnum] = DUPLICATES[key]
    -        yield error
    -
    -
     class CodeContext(object):
     
         """ Read file if code is None. """
    @@ -193,16 +178,19 @@ def __init__(self, code, path):
             self._file = None
     
         def __enter__(self):
    -        """ Open file and read a code. """
    +        """ Open a file and read it. """
             if self.code is None:
    +            LOGGER.info("File is reading: %s", self.path)
                 self._file = open(self.path, 'rU')
                 self.code = self._file.read()
             return self
     
         def __exit__(self, t, value, traceback):
    -        """ Close opened file. """
    +        """ Close the file which was opened. """
             if self._file is not None:
                 self._file.close()
     
             if t and LOGGER.level == logging.DEBUG:
                 LOGGER.debug(traceback)
    +
    +# pylama:ignore=R0912,D210,F0001
    diff --git a/pymode/libs/pylama/errors.py b/pymode/libs/pylama/errors.py
    index 9e80d2a6..7f6c0a11 100644
    --- a/pymode/libs/pylama/errors.py
    +++ b/pymode/libs/pylama/errors.py
    @@ -1,13 +1,13 @@
    -""" Dont duplicate errors same type. """
    +""" Don't duplicate same errors from different linters. """
    +
    +from collections import defaultdict
    +
     
     DUPLICATES = (
     
         # multiple statements on one line
         [('pep8', 'E701'), ('pylint', 'C0321')],
     
    -    # missing whitespace around operator
    -    [('pep8', 'E225'), ('pylint', 'C0326')],
    -
         # unused variable
         [('pylint', 'W0612'), ('pyflakes', 'W0612')],
     
    @@ -17,15 +17,24 @@
         # unused import
         [('pylint', 'W0611'), ('pyflakes', 'W0611')],
     
    +    # whitespace before ')'
    +    [('pylint', 'C0326'), ('pep8', 'E202')],
    +
    +    # whitespace before '('
    +    [('pylint', 'C0326'), ('pep8', 'E211')],
    +
    +    # multiple spaces after operator
    +    [('pylint', 'C0326'), ('pep8', 'E222')],
    +
    +    # missing whitespace around operator
    +    [('pylint', 'C0326'), ('pep8', 'E225')],
    +
         # unexpected spaces
         [('pylint', 'C0326'), ('pep8', 'E251')],
     
         # long lines
         [('pylint', 'C0301'), ('pep8', 'E501')],
     
    -    # whitespace before '('
    -    [('pylint', 'C0326'), ('pep8', 'E211')],
    -
         # statement ends with a semicolon
         [('pylint', 'W0301'), ('pep8', 'E703')],
     
    @@ -35,14 +44,32 @@
         # bad indentation
         [('pylint', 'W0311'), ('pep8', 'E111')],
     
    +    # wildcart import
    +    [('pylint', 'W00401'), ('pyflakes', 'W0401')],
    +
    +    # module docstring
    +    [('pep257', 'D100'), ('pylint', 'C0111')],
    +
     )
     
     DUPLICATES = dict((key, values) for values in DUPLICATES for key in values)
     
     
    +def remove_duplicates(errors):
    +    """ Filter duplicates from given error's list. """
    +    passed = defaultdict(list)
    +    for error in errors:
    +        key = error.linter, error.number
    +        if key in DUPLICATES:
    +            if key in passed[error.lnum]:
    +                continue
    +            passed[error.lnum] = DUPLICATES[key]
    +        yield error
    +
    +
     class Error(object):
     
    -    """ Store error information. """
    +    """ Store an error's information. """
     
         def __init__(self, linter="", col=1, lnum=1, type="E",
                      text="unknown error", filename="", **kwargs):
    @@ -51,7 +78,7 @@ def __init__(self, linter="", col=1, lnum=1, type="E",
             if linter:
                 text = "%s [%s]" % (text, linter)
             number = text.split(' ', 1)[0]
    -        self._info = dict(linter=linter, col=col, lnum=lnum, type=type,
    +        self._info = dict(linter=linter, col=col, lnum=lnum, type=type[:1],
                               text=text, filename=filename, number=number)
     
         def __getattr__(self, name):
    diff --git a/pymode/libs/pylama/hook.py b/pymode/libs/pylama/hook.py
    index 0dc34069..f65ef46f 100644
    --- a/pymode/libs/pylama/hook.py
    +++ b/pymode/libs/pylama/hook.py
    @@ -6,7 +6,7 @@
     from os import path as op, chmod
     from subprocess import Popen, PIPE
     
    -from .main import LOGGER
    +from .main import LOGGER, process_paths
     from .config import parse_options, setup_logger
     
     
    @@ -30,18 +30,17 @@ def run(command):
     
     def git_hook():
         """ Run pylama after git commit. """
    -    from .main import check_files
    -
         _, files_modified, _ = run("git diff-index --cached --name-only HEAD")
     
         options = parse_options()
         setup_logger(options)
    -    check_files([f for f in map(str, files_modified)], options)
    +    candidates = list(map(str, files_modified))
    +    if candidates:
    +        process_paths(options, candidates=candidates)
     
     
     def hg_hook(ui, repo, node=None, **kwargs):
         """ Run pylama after mercurial commit. """
    -    from .main import check_files
         seen = set()
         paths = []
         if len(repo):
    @@ -55,7 +54,8 @@ def hg_hook(ui, repo, node=None, **kwargs):
     
         options = parse_options()
         setup_logger(options)
    -    check_files(paths, options)
    +    if paths:
    +        process_paths(options, candidates=paths)
     
     
     def install_git(path):
    @@ -79,7 +79,7 @@ def install_hg(path):
             open(hook, 'w+').close()
     
         c = ConfigParser()
    -    c.readfp(open(path, 'r'))
    +    c.readfp(open(hook, 'r'))
         if not c.has_section('hooks'):
             c.add_section('hooks')
     
    @@ -89,7 +89,7 @@ def install_hg(path):
         if not c.has_option('hooks', 'qrefresh'):
             c.set('hooks', 'qrefresh', 'python:pylama.hooks.hg_hook')
     
    -    c.write(open(path, 'w+'))
    +    c.write(open(hook, 'w+'))
     
     
     def install_hook(path):
    @@ -101,11 +101,11 @@ def install_hook(path):
             LOGGER.warn('Git hook has been installed.')
     
         elif op.exists(hg):
    -        install_hg(git)
    +        install_hg(hg)
             LOGGER.warn('Mercurial hook has been installed.')
     
         else:
             LOGGER.error('VCS has not found. Check your path.')
             sys.exit(1)
     
    -# lint_ignore=F0401,E1103
    +# pylama:ignore=F0401,E1103,D210,F0001
    diff --git a/pymode/libs/pylama/lint/__init__.py b/pymode/libs/pylama/lint/__init__.py
    index d5d75901..bd8e8da7 100644
    --- a/pymode/libs/pylama/lint/__init__.py
    +++ b/pymode/libs/pylama/lint/__init__.py
    @@ -1,22 +1,19 @@
    -""" Custom module loader. """
    +"""Custom module loader."""
     
     
    -class Linter(object): # noqa
    +class Linter(object):
     
    -    """ Abstract class for linter plugin. """
    +    """Abstract class for linter plugin."""
     
         @staticmethod
         def allow(path):
    -        """ Check path is relevant for linter.
    +        """Check path is relevant for linter.
     
             :return bool:
    -
             """
    -
             return path.endswith('.py')
     
         @staticmethod
         def run(path, **meta):
    -        """ Method 'run' should be defined. """
    -
    +        """Method 'run' should be defined."""
             raise NotImplementedError(__doc__)
    diff --git a/pymode/libs/pylama/lint/extensions.py b/pymode/libs/pylama/lint/extensions.py
    index 6e0bc3d2..7092fba3 100644
    --- a/pymode/libs/pylama/lint/extensions.py
    +++ b/pymode/libs/pylama/lint/extensions.py
    @@ -1,25 +1,30 @@
    -""" Load extensions. """
    +"""Load extensions."""
     
    -from os import listdir, path as op
    +LINTERS = {}
     
    +try:
    +    from pylama.lint.pylama_mccabe import Linter
    +    LINTERS['mccabe'] = Linter()
    +except ImportError:
    +    pass
     
    -CURDIR = op.dirname(__file__)
    -LINTERS = dict()
    -PREFIX = 'pylama_'
    +try:
    +    from pylama.lint.pylama_pep257 import Linter
    +    LINTERS['pep257'] = Linter()
    +except ImportError:
    +    pass
     
     try:
    -    from importlib import import_module
    +    from pylama.lint.pylama_pep8 import Linter
    +    LINTERS['pep8'] = Linter()
     except ImportError:
    -    from ..libs.importlib import import_module
    +    pass
     
    -for p in listdir(CURDIR):
    -    if p.startswith(PREFIX) and op.isdir(op.join(CURDIR, p)):
    -        name = p[len(PREFIX):]
    -        try:
    -            module = import_module('.lint.%s%s' % (PREFIX, name), 'pylama')
    -            LINTERS[name] = getattr(module, 'Linter')()
    -        except ImportError:
    -            continue
    +try:
    +    from pylama.lint.pylama_pyflakes import Linter
    +    LINTERS['pyflakes'] = Linter()
    +except ImportError:
    +    pass
     
     try:
         from pkg_resources import iter_entry_points
    @@ -29,3 +34,5 @@
                 LINTERS[entry.name] = entry.load()()
     except ImportError:
         pass
    +
    +#  pylama:ignore=E0611
    diff --git a/pymode/libs/pylama/lint/pylama_mccabe.py b/pymode/libs/pylama/lint/pylama_mccabe.py
    new file mode 100644
    index 00000000..fc191004
    --- /dev/null
    +++ b/pymode/libs/pylama/lint/pylama_mccabe.py
    @@ -0,0 +1,29 @@
    +"""Code complexity checking."""
    +from mccabe import McCabeChecker
    +
    +from pylama.lint import Linter as Abstract
    +import ast
    +
    +
    +class Linter(Abstract):
    +
    +    """Run complexity checking."""
    +
    +    @staticmethod
    +    def run(path, code=None, params=None, **meta):
    +        """MCCabe code checking.
    +
    +        :return list: List of errors.
    +        """
    +        try:
    +            tree = compile(code, path, "exec", ast.PyCF_ONLY_AST)
    +        except SyntaxError as exc:
    +            return [{'lnum': exc.lineno, 'text': 'Invalid syntax: %s' % exc.text.strip()}]
    +
    +        McCabeChecker.max_complexity = int(params.get('complexity', 10))
    +        return [
    +            {'lnum': lineno, 'offset': offset, 'text': text, 'type': McCabeChecker._code}
    +            for lineno, offset, text, _ in McCabeChecker(tree, path).run()
    +        ]
    +
    +#  pylama:ignore=W0212
    diff --git a/pymode/libs/pylama/lint/pylama_mccabe/__init__.py b/pymode/libs/pylama/lint/pylama_mccabe/__init__.py
    deleted file mode 100644
    index da8b5f2a..00000000
    --- a/pymode/libs/pylama/lint/pylama_mccabe/__init__.py
    +++ /dev/null
    @@ -1,20 +0,0 @@
    -""" Check complexity. """
    -
    -from .. import Linter as BaseLinter
    -
    -
    -class Linter(BaseLinter):
    -
    -    """ Mccabe code complexity. """
    -
    -    @staticmethod
    -    def run(path, code=None, params=None, **meta):
    -        """ MCCabe code checking.
    -
    -        :return list: List of errors.
    -
    -        """
    -        from .mccabe import get_code_complexity
    -
    -        complexity = int(params.get('complexity', 10))
    -        return get_code_complexity(code, complexity, filename=path) or []
    diff --git a/pymode/libs/pylama/lint/pylama_pep257.py b/pymode/libs/pylama/lint/pylama_pep257.py
    new file mode 100644
    index 00000000..5e1f785c
    --- /dev/null
    +++ b/pymode/libs/pylama/lint/pylama_pep257.py
    @@ -0,0 +1,21 @@
    +"""PEP257 support."""
    +
    +from pep257 import PEP257Checker
    +
    +from pylama.lint import Linter as Abstract
    +
    +
    +class Linter(Abstract):
    +
    +    """Check PEP257 errors."""
    +
    +    @staticmethod
    +    def run(path, code=None, **meta):
    +        """PEP257 code checking.
    +
    +        :return list: List of errors.
    +        """
    +        return [
    +            {'lnum': e.line, 'text': e.message, 'type': 'D'}
    +            for e in PEP257Checker().check_source(code, path)
    +        ]
    diff --git a/pymode/libs/pylama/lint/pylama_pep257/__init__.py b/pymode/libs/pylama/lint/pylama_pep257/__init__.py
    deleted file mode 100644
    index 99474666..00000000
    --- a/pymode/libs/pylama/lint/pylama_pep257/__init__.py
    +++ /dev/null
    @@ -1,26 +0,0 @@
    -""" Check PEP257. """
    -
    -from .. import Linter as BaseLinter
    -
    -
    -class Linter(BaseLinter):
    -
    -    """ Mccabe code complexity. """
    -
    -    @staticmethod
    -    def run(path, code=None, **meta):
    -        """ PEP257 code checking.
    -
    -        :return list: List of errors.
    -
    -        """
    -        from .pep257 import PEP257Checker
    -
    -        errors = []
    -        for er in PEP257Checker().check_source(code, path):
    -            errors.append(dict(
    -                lnum=er.line,
    -                text=er.message,
    -                type='D',
    -            ))
    -        return errors
    diff --git a/pymode/libs/pylama/lint/pylama_pep257/pep257.py b/pymode/libs/pylama/lint/pylama_pep257/pep257.py
    deleted file mode 100644
    index c5df0f72..00000000
    --- a/pymode/libs/pylama/lint/pylama_pep257/pep257.py
    +++ /dev/null
    @@ -1,728 +0,0 @@
    -#! /usr/bin/env python
    -"""Static analysis tool for checking docstring conventions and style.
    -
    -Implemented checks cover PEP257:
    -http://www.python.org/dev/peps/pep-0257/
    -
    -Other checks can be added, e.g. NumPy docstring conventions:
    -https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
    -
    -The repository is located at:
    -http://github.com/GreenSteam/pep257
    -
    -"""
    -from __future__ import with_statement
    -
    -import os
    -import sys
    -import tokenize as tk
    -from itertools import takewhile, dropwhile, chain
    -from optparse import OptionParser
    -from re import compile as re
    -
    -
    -try:
    -    from StringIO import StringIO
    -except ImportError:  # Python 3.0 and later
    -    from io import StringIO
    -
    -
    -try:
    -    next
    -except NameError:  # Python 2.5 and earlier
    -    nothing = object()
    -
    -    def next(obj, default=nothing):
    -        if default == nothing:
    -            return obj.next()
    -        else:
    -            try:
    -                return obj.next()
    -            except StopIteration:
    -                return default
    -
    -
    -__version__ = '0.3.3-alpha'
    -__all__ = ('check', 'collect')
    -
    -
    -humanize = lambda string: re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower()
    -is_magic = lambda name: name.startswith('__') and name.endswith('__')
    -is_ascii = lambda string: all(ord(char) < 128 for char in string)
    -is_blank = lambda string: not string.strip()
    -leading_space = lambda string: re('\s*').match(string).group()
    -
    -
    -class Value(object):
    -
    -    __init__ = lambda self, *args: vars(self).update(zip(self._fields, args))
    -    __hash__ = lambda self: hash(repr(self))
    -    __eq__ = lambda self, other: other and vars(self) == vars(other)
    -
    -    def __repr__(self):
    -        args = [vars(self)[field] for field in self._fields]
    -        return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr, args)))
    -
    -
    -class Definition(Value):
    -
    -    _fields = 'name _source start end docstring children parent'.split()
    -
    -    _human = property(lambda self: humanize(type(self).__name__))
    -    kind = property(lambda self: self._human.split()[-1])
    -    module = property(lambda self: self.parent.module)
    -    all = property(lambda self: self.module.all)
    -    _slice = property(lambda self: slice(self.start - 1, self.end))
    -    source = property(lambda self: ''.join(self._source[self._slice]))
    -    __iter__ = lambda self: chain([self], *self.children)
    -
    -    @property
    -    def _publicity(self):
    -        return {True: 'public', False: 'private'}[self.is_public]
    -
    -    def __str__(self):
    -        return 'in %s %s `%s`' % (self._publicity, self._human, self.name)
    -
    -
    -class Module(Definition):
    -
    -    _fields = 'name _source start end docstring children parent _all'.split()
    -    is_public = True
    -    _nest = staticmethod(lambda s: {'def': Function, 'class': Class}[s])
    -    module = property(lambda self: self)
    -    all = property(lambda self: self._all)
    -    __str__ = lambda self: 'at module level'
    -
    -
    -class Function(Definition):
    -
    -    _nest = staticmethod(lambda s: {'def': NestedFunction,
    -                                    'class': NestedClass}[s])
    -
    -    @property
    -    def is_public(self):
    -        if self.all is not None:
    -            return self.name in self.all
    -        else:  # TODO: are there any magic functions? not methods
    -            return not self.name.startswith('_') or is_magic(self.name)
    -
    -
    -class NestedFunction(Function):
    -
    -    is_public = False
    -
    -
    -class Method(Function):
    -
    -    @property
    -    def is_public(self):
    -        name_is_public = not self.name.startswith('_') or is_magic(self.name)
    -        return self.parent.is_public and name_is_public
    -
    -
    -class Class(Definition):
    -
    -    _nest = staticmethod(lambda s: {'def': Method, 'class': NestedClass}[s])
    -    is_public = Function.is_public
    -
    -
    -class NestedClass(Class):
    -
    -    is_public = False
    -
    -
    -class Token(Value):
    -
    -    _fields = 'kind value start end source'.split()
    -
    -
    -class TokenStream(object):
    -
    -    def __init__(self, filelike):
    -        self._generator = tk.generate_tokens(filelike.readline)
    -        self.current = Token(*next(self._generator, None))
    -        self.line = self.current.start[0]
    -
    -    def move(self):
    -        previous = self.current
    -        current = next(self._generator, None)
    -        self.current = None if current is None else Token(*current)
    -        self.line = self.current.start[0] if self.current else self.line
    -        return previous
    -
    -    def __iter__(self):
    -        while True:
    -            if self.current is not None:
    -                yield self.current
    -            else:
    -                return
    -            self.move()
    -
    -
    -class AllError(Exception):
    -
    -    def __init__(self, message):
    -        Exception.__init__(
    -            self, message +
    -            'That means pep257 cannot decide which definitions are public. '
    -            'Variable __all__ should be present at most once in each file, '
    -            "in form `__all__ = ('a_public_function', 'APublicClass', ...)`. "
    -            'More info on __all__: http://stackoverflow.com/q/44834/. ')
    -
    -
    -class Parser(object):
    -
    -    def __call__(self, filelike, filename):
    -        self.source = filelike.readlines()
    -        src = ''.join(self.source)
    -        self.stream = TokenStream(StringIO(src))
    -        self.filename = filename
    -        self.all = None
    -        return self.parse_module()
    -
    -    current = property(lambda self: self.stream.current)
    -    line = property(lambda self: self.stream.line)
    -
    -    def consume(self, kind):
    -        assert self.stream.move().kind == kind
    -
    -    def leapfrog(self, kind):
    -        for token in self.stream:
    -            if token.kind == kind:
    -                self.consume(kind)
    -                return
    -
    -    def parse_docstring(self):
    -        for token in self.stream:
    -            if token.kind in [tk.COMMENT, tk.NEWLINE, tk.NL]:
    -                continue
    -            elif token.kind == tk.STRING:
    -                return token.value
    -            else:
    -                return None
    -
    -    def parse_definitions(self, class_, all=False):
    -        for token in self.stream:
    -            if all and token.value == '__all__':
    -                self.parse_all()
    -            if token.value in ['def', 'class']:
    -                yield self.parse_definition(class_._nest(token.value))
    -            if token.kind == tk.INDENT:
    -                self.consume(tk.INDENT)
    -                for definition in self.parse_definitions(class_):
    -                    yield definition
    -            if token.kind == tk.DEDENT:
    -                return
    -
    -    def parse_all(self):
    -        assert self.current.value == '__all__'
    -        self.consume(tk.NAME)
    -        if self.current.value != '=':
    -            raise AllError('Could not evaluate contents of __all__. ')
    -        self.consume(tk.OP)
    -        if self.current.value not in '([':
    -            raise AllError('Could not evaluate contents of __all__. ')
    -        if self.current.value == '[':
    -            msg = ("%s WARNING: __all__ is defined as a list, this means "
    -                   "pep257 cannot reliably detect contents of the __all__ "
    -                   "variable, because it can be mutated. Change __all__ to be "
    -                   "an (immutable) tuple, to remove this warning. Note, "
    -                   "pep257 uses __all__ to detect which definitions are "
    -                   "public, to warn if public definitions are missing "
    -                   "docstrings. If __all__ is a (mutable) list, pep257 cannot "
    -                   "reliably assume its contents. pep257 will proceed "
    -                   "assuming __all__ is not mutated.\n" % self.filename)
    -            sys.stderr.write(msg)
    -        self.consume(tk.OP)
    -        s = '('
    -        while self.current.kind in (tk.NL, tk.COMMENT):
    -            self.stream.move()
    -        if self.current.kind != tk.STRING:
    -            raise AllError('Could not evaluate contents of __all__. ')
    -        while self.current.value not in ')]':
    -            s += self.current.value
    -            self.stream.move()
    -        s += ')'
    -        try:
    -            self.all = eval(s, {})
    -        except BaseException:
    -            raise AllError('Could not evaluate contents of __all__: %s. ' % s)
    -
    -    def parse_module(self):
    -        start = self.line
    -        docstring = self.parse_docstring()
    -        children = list(self.parse_definitions(Module, all=True))
    -        assert self.current is None
    -        end = self.line
    -        module = Module(self.filename, self.source, start, end,
    -                        docstring, children, None, self.all)
    -        for child in module.children:
    -            child.parent = module
    -        return module
    -
    -    def parse_definition(self, class_):
    -        start = self.line
    -        self.consume(tk.NAME)
    -        name = self.current.value
    -        self.leapfrog(tk.INDENT)
    -        assert self.current.kind != tk.INDENT
    -        docstring = self.parse_docstring()
    -        children = list(self.parse_definitions(class_))
    -        assert self.current.kind == tk.DEDENT
    -        end = self.line - 1
    -        definition = class_(name, self.source, start, end,
    -                            docstring, children, None)
    -        for child in definition.children:
    -            child.parent = definition
    -        return definition
    -
    -
    -class Error(object):
    -
    -    """Error in docstring style."""
    -
    -    # Options that define how errors are printed:
    -    explain = False
    -    source = False
    -
    -    def __init__(self, message=None, final=False):
    -        self.message, self.is_final = message, final
    -        self.definition, self.explanation = [None, None]
    -
    -    code = property(lambda self: self.message.partition(':')[0])
    -    filename = property(lambda self: self.definition.module.name)
    -    line = property(lambda self: self.definition.start)
    -
    -    @property
    -    def lines(self):
    -        source = ''
    -        lines = self.definition._source[self.definition._slice]
    -        offset = self.definition.start
    -        lines_stripped = list(reversed(list(dropwhile(is_blank,
    -                                                      reversed(lines)))))
    -        numbers_width = 0
    -        for n, line in enumerate(lines_stripped):
    -            numbers_width = max(numbers_width, n + offset)
    -        numbers_width = len(str(numbers_width))
    -        numbers_width = 6
    -        for n, line in enumerate(lines_stripped):
    -            source += '%*d: %s' % (numbers_width, n + offset, line)
    -            if n > 5:
    -                source += '        ...\n'
    -                break
    -        return source
    -
    -    def __str__(self):
    -        self.explanation = '\n'.join(l for l in self.explanation.split('\n')
    -                                     if not is_blank(l))
    -        template = '%(filename)s:%(line)s %(definition)s:\n        %(message)s'
    -        if self.source and self.explain:
    -            template += '\n\n%(explanation)s\n\n%(lines)s\n'
    -        elif self.source and not self.explain:
    -            template += '\n\n%(lines)s\n'
    -        elif self.explain and not self.source:
    -            template += '\n\n%(explanation)s\n\n'
    -        return template % dict((name, getattr(self, name)) for name in
    -                               ['filename', 'line', 'definition', 'message',
    -                                'explanation', 'lines'])
    -
    -    __repr__ = __str__
    -
    -    def __lt__(self, other):
    -        return (self.filename, self.line) < (other.filename, other.line)
    -
    -
    -def parse_options():
    -    parser = OptionParser(version=__version__,
    -                          usage='Usage: pep257 [options] [...]')
    -    option = parser.add_option
    -    option('-e', '--explain', action='store_true',
    -           help='show explanation of each error')
    -    option('-s', '--source', action='store_true',
    -           help='show source for each error')
    -    option('--ignore', metavar='', default='',
    -           help='ignore a list comma-separated error codes, '
    -                'for example: --ignore=D101,D202')
    -    option('--match', metavar='', default='(?!test_).*\.py',
    -           help="check only files that exactly match  regular "
    -                "expression; default is --match='(?!test_).*\.py' which "
    -                "matches files that don't start with 'test_' but end with "
    -                "'.py'")
    -    option('--match-dir', metavar='', default='[^\.].*',
    -           help="search only dirs that exactly match  regular "
    -                "expression; default is --match-dir='[^\.].*', which matches "
    -                "all dirs that don't start with a dot")
    -    return parser.parse_args()
    -
    -
    -def collect(names, match=lambda name: True, match_dir=lambda name: True):
    -    """Walk dir trees under `names` and generate filnames that `match`.
    -
    -    Example
    -    -------
    -    >>> sorted(collect(['non-dir.txt', './'],
    -    ...                match=lambda name: name.endswith('.py')))
    -    ['non-dir.txt', './pep257.py', './setup.py', './test_pep257.py']
    -
    -    """
    -    for name in names:  # map(expanduser, names):
    -        if os.path.isdir(name):
    -            for root, dirs, filenames in os.walk(name):
    -                for dir in dirs:
    -                    if not match_dir(dir):
    -                        dirs.remove(dir)  # do not visit those dirs
    -                for filename in filenames:
    -                    if match(filename):
    -                        yield os.path.join(root, filename)
    -        else:
    -            yield name
    -
    -
    -def check(filenames, ignore=()):
    -    """Generate PEP 257 errors that exist in `filenames` iterable.
    -
    -    Skips errors with error-codes defined in `ignore` iterable.
    -
    -    Example
    -    -------
    -    >>> check(['pep257.py'], ignore=['D100'])
    -    
    -
    -    """
    -    for filename in filenames:
    -        try:
    -            with open(filename) as file:
    -                source = file.read()
    -            for error in PEP257Checker().check_source(source, filename):
    -                code = getattr(error, 'code', None)
    -                if code is not None and code not in ignore:
    -                    yield error
    -        except (EnvironmentError, AllError):
    -            yield sys.exc_info()[1]
    -        except tk.TokenError:
    -            yield SyntaxError('invalid syntax in file %s' % filename)
    -
    -
    -def main(options, arguments):
    -    Error.explain = options.explain
    -    Error.source = options.source
    -    collected = collect(arguments or ['.'],
    -                        match=re(options.match + '$').match,
    -                        match_dir=re(options.match_dir + '$').match)
    -    code = 0
    -    for error in check(collected, ignore=options.ignore.split(',')):
    -        sys.stderr.write('%s\n' % error)
    -        code = 1
    -    return code
    -
    -
    -parse = Parser()
    -
    -
    -def check_for(kind, terminal=False):
    -    def decorator(f):
    -        f._check_for = kind
    -        f._terminal = terminal
    -        return f
    -    return decorator
    -
    -
    -class PEP257Checker(object):
    -
    -    """Checker for PEP 257.
    -
    -    D10x: Missing docstrings
    -    D20x: Whitespace issues
    -    D30x: Docstring formatting
    -    D40x: Docstring content issues
    -
    -    """
    -
    -    def check_source(self, source, filename):
    -        module = parse(StringIO(source), filename)
    -        for definition in module:
    -            for check in self.checks:
    -                terminate = False
    -                if isinstance(definition, check._check_for):
    -                    error = check(None, definition, definition.docstring)
    -                    errors = error if hasattr(error, '__iter__') else [error]
    -                    for error in errors:
    -                        if error is not None:
    -                            partition = check.__doc__.partition('.\n')
    -                            message, _, explanation = partition
    -                            if error.message is None:
    -                                error.message = message
    -                            error.explanation = explanation
    -                            error.definition = definition
    -                            yield error
    -                            if check._terminal:
    -                                terminate = True
    -                                break
    -                if terminate:
    -                    break
    -
    -    @property
    -    def checks(self):
    -        all = [check for check in vars(type(self)).values()
    -               if hasattr(check, '_check_for')]
    -        return sorted(all, key=lambda check: not check._terminal)
    -
    -    @check_for(Definition, terminal=True)
    -    def check_docstring_missing(self, definition, docstring):
    -        """D10{0,1,2,3}: Public definitions should have docstrings.
    -
    -        All modules should normally have docstrings.  [...] all functions and
    -        classes exported by a module should also have docstrings. Public
    -        methods (including the __init__ constructor) should also have
    -        docstrings.
    -
    -        Note: Public (exported) definitions are either those with names listed
    -              in __all__ variable (if present), or those that do not start
    -              with a single underscore.
    -
    -        """
    -        if (not docstring and definition.is_public or
    -                docstring and is_blank(eval(docstring))):
    -            codes = {Module: 'D100', Class: 'D101', NestedClass: 'D101',
    -                     Method: 'D102', Function: 'D103', NestedFunction: 'D103'}
    -            return Error('%s: Docstring missing' % codes[type(definition)])
    -
    -    @check_for(Definition)
    -    def check_one_liners(self, definition, docstring):
    -        """D200: One-liner docstrings should fit on one line with quotes.
    -
    -        The closing quotes are on the same line as the opening quotes.
    -        This looks better for one-liners.
    -
    -        """
    -        if docstring:
    -            lines = eval(docstring).split('\n')
    -            if len(lines) > 1:
    -                non_empty_lines = sum(1 for l in lines if not is_blank(l))
    -                if non_empty_lines == 1:
    -                    return Error('D200: One-line docstring should not occupy '
    -                                 '%s lines' % len(lines))
    -
    -    @check_for(Function)
    -    def check_no_blank_before(self, function, docstring):  # def
    -        """D20{1,2}: No blank lines allowed around function/method docstring.
    -
    -        There's no blank line either before or after the docstring.
    -
    -        """
    -        # NOTE: This does not take comments into account.
    -        # NOTE: This does not take into account functions with groups of code.
    -        if docstring:
    -            before, _, after = function.source.partition(docstring)
    -            blanks_before = list(map(is_blank, before.split('\n')[:-1]))
    -            blanks_after = list(map(is_blank, after.split('\n')[1:]))
    -            blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
    -            blanks_after_count = sum(takewhile(bool, blanks_after))
    -            if blanks_before_count != 0:
    -                yield Error('D201: No blank lines allowed *before* %s '
    -                            'docstring, found %s'
    -                            % (function.kind, blanks_before_count))
    -            if not all(blanks_after) and blanks_after_count != 0:
    -                yield Error('D202: No blank lines allowed *after* %s '
    -                            'docstring, found %s'
    -                            % (function.kind, blanks_after_count))
    -
    -    @check_for(Class)
    -    def check_blank_before_after_class(slef, class_, docstring):
    -        """D20{3,4}: Class docstring should have 1 blank line around them.
    -
    -        Insert a blank line before and after all docstrings (one-line or
    -        multi-line) that document a class -- generally speaking, the class's
    -        methods are separated from each other by a single blank line, and the
    -        docstring needs to be offset from the first method by a blank line;
    -        for symmetry, put a blank line between the class header and the
    -        docstring.
    -
    -        """
    -        # NOTE: this gives flase-positive in this case
    -        # class Foo:
    -        #
    -        #     """Docstring."""
    -        #
    -        #
    -        # # comment here
    -        # def foo(): pass
    -        if docstring:
    -            before, _, after = class_.source.partition(docstring)
    -            blanks_before = list(map(is_blank, before.split('\n')[:-1]))
    -            blanks_after = list(map(is_blank, after.split('\n')[1:]))
    -            blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
    -            blanks_after_count = sum(takewhile(bool, blanks_after))
    -            if blanks_before_count != 1:
    -                yield Error('D203: Expected 1 blank line *before* class '
    -                            'docstring, found %s' % blanks_before_count)
    -            if not all(blanks_after) and blanks_after_count != 1:
    -                yield Error('D204: Expected 1 blank line *after* class '
    -                            'docstring, found %s' % blanks_after_count)
    -
    -    @check_for(Definition)
    -    def check_blank_after_summary(self, definition, docstring):
    -        """D205: Blank line missing between one-line summary and description.
    -
    -        Multi-line docstrings consist of a summary line just like a one-line
    -        docstring, followed by a blank line, followed by a more elaborate
    -        description. The summary line may be used by automatic indexing tools;
    -        it is important that it fits on one line and is separated from the
    -        rest of the docstring by a blank line.
    -
    -        """
    -        if docstring:
    -            lines = eval(docstring).strip().split('\n')
    -            if len(lines) > 1 and not is_blank(lines[1]):
    -                return Error()
    -
    -    @check_for(Definition)
    -    def check_indent(self, definition, docstring):
    -        """D20{6,7,8}: The entire docstring should be indented same as code.
    -
    -        The entire docstring is indented the same as the quotes at its
    -        first line.
    -
    -        """
    -        if docstring:
    -            before_docstring, _, _ = definition.source.partition(docstring)
    -            _, _, indent = before_docstring.rpartition('\n')
    -            lines = docstring.split('\n')
    -            if len(lines) > 1:
    -                lines = lines[1:]  # First line does not need indent.
    -                indents = [leading_space(l) for l in lines if not is_blank(l)]
    -                if set(' \t') == set(''.join(indents) + indent):
    -                    return Error('D206: Docstring indented with both tabs and '
    -                                 'spaces')
    -                if (len(indents) > 1 and min(indents[:-1]) > indent
    -                        or indents[-1] > indent):
    -                    return Error('D208: Docstring is over-indented')
    -                if min(indents) < indent:
    -                    return Error('D207: Docstring is under-indented')
    -
    -    @check_for(Definition)
    -    def check_newline_after_last_paragraph(self, definition, docstring):
    -        """D209: Put multi-line docstring closing quotes on separate line.
    -
    -        Unless the entire docstring fits on a line, place the closing
    -        quotes on a line by themselves.
    -
    -        """
    -        if docstring:
    -            lines = [l for l in eval(docstring).split('\n') if not is_blank(l)]
    -            if len(lines) > 1:
    -                if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
    -                    return Error('D209: Put multi-line docstring closing '
    -                                 'quotes on separate line')
    -
    -    @check_for(Definition)
    -    def check_triple_double_quotes(self, definition, docstring):
    -        r'''D300: Use """triple double quotes""".
    -
    -        For consistency, always use """triple double quotes""" around
    -        docstrings. Use r"""raw triple double quotes""" if you use any
    -        backslashes in your docstrings. For Unicode docstrings, use
    -        u"""Unicode triple-quoted strings""".
    -
    -        Note: Exception to this is made if the docstring contains
    -              """ quotes in its body.
    -
    -        '''
    -        if docstring and '"""' in eval(docstring) and docstring.startswith(
    -                ("'''", "r'''", "u'''")):
    -            # Allow ''' quotes if docstring contains """, because otherwise """
    -            # quotes could not be expressed inside docstring.  Not in PEP 257.
    -            return
    -        if docstring and not docstring.startswith(('"""', 'r"""', 'u"""')):
    -            quotes = "'''" if "'''" in docstring[:4] else "'"
    -            return Error('D300: Expected """-quotes, got %s-quotes' % quotes)
    -
    -    @check_for(Definition)
    -    def check_backslashes(self, definition, docstring):
    -        r'''D301: Use r""" if any backslashes in a docstring.
    -
    -        Use r"""raw triple double quotes""" if you use any backslashes
    -        (\) in your docstrings.
    -
    -        '''
    -        # Just check that docstring is raw, check_triple_double_quotes
    -        # ensures the correct quotes.
    -        if docstring and '\\' in docstring and not docstring.startswith('r'):
    -            return Error()
    -
    -    @check_for(Definition)
    -    def check_unicode_docstring(self, definition, docstring):
    -        r'''D302: Use u""" for docstrings with Unicode.
    -
    -        For Unicode docstrings, use u"""Unicode triple-quoted strings""".
    -
    -        '''
    -        # Just check that docstring is unicode, check_triple_double_quotes
    -        # ensures the correct quotes.
    -        if docstring and sys.version_info[0] <= 2:
    -            if not is_ascii(docstring) and not docstring.startswith('u'):
    -                return Error()
    -
    -    @check_for(Definition)
    -    def check_ends_with_period(self, definition, docstring):
    -        """D400: First line should end with a period.
    -
    -        The [first line of a] docstring is a phrase ending in a period.
    -
    -        """
    -        if docstring:
    -            summary_line = eval(docstring).strip().split('\n')[0]
    -            if not summary_line.endswith('.'):
    -                return Error("D400: First line should end with '.', not %r"
    -                             % summary_line[-1])
    -
    -    @check_for(Function)
    -    def check_imperative_mood(self, function, docstring):  # def context
    -        """D401: First line should be in imperative mood: 'Do', not 'Does'.
    -
    -        [Docstring] prescribes the function or method's effect as a command:
    -        ("Do this", "Return that"), not as a description; e.g. don't write
    -        "Returns the pathname ...".
    -
    -        """
    -        if docstring:
    -            stripped = eval(docstring).strip()
    -            if stripped:
    -                first_word = stripped.split()[0]
    -                if first_word.endswith('s') and not first_word.endswith('ss'):
    -                    return Error('D401: First line should be imperative: '
    -                                 '%r, not %r' % (first_word[:-1], first_word))
    -
    -    @check_for(Function)
    -    def check_no_signature(self, function, docstring):  # def context
    -        """D402: First line should not be function's or method's "signature".
    -
    -        The one-line docstring should NOT be a "signature" reiterating the
    -        function/method parameters (which can be obtained by introspection).
    -
    -        """
    -        if docstring:
    -            first_line = eval(docstring).strip().split('\n')[0]
    -            if function.name + '(' in first_line.replace(' ', ''):
    -                return Error("D402: First line should not be %s's signature"
    -                             % function.kind)
    -
    -    # Somewhat hard to determine if return value is mentioned.
    -    # @check(Function)
    -    def SKIP_check_return_type(self, function, docstring):
    -        """D40x: Return value type should be mentioned.
    -
    -        [T]he nature of the return value cannot be determined by
    -        introspection, so it should be mentioned.
    -
    -        """
    -        if docstring and function.returns_value:
    -            if 'return' not in docstring.lower():
    -                return Error()
    -
    -
    -if __name__ == '__main__':
    -    try:
    -        sys.exit(main(*parse_options()))
    -    except KeyboardInterrupt:
    -        pass
    diff --git a/pymode/libs/pylama/lint/pylama_pep8/__init__.py b/pymode/libs/pylama/lint/pylama_pep8.py
    similarity index 65%
    rename from pymode/libs/pylama/lint/pylama_pep8/__init__.py
    rename to pymode/libs/pylama/lint/pylama_pep8.py
    index a0a4ecb7..30329d80 100644
    --- a/pymode/libs/pylama/lint/pylama_pep8/__init__.py
    +++ b/pymode/libs/pylama/lint/pylama_pep8.py
    @@ -1,6 +1,8 @@
    -""" Check PEP8. """
    -from .. import Linter as BaseLinter
    -from .pep8 import BaseReport, StyleGuide
    +"""PEP8 support."""
    +from pep8 import BaseReport, StyleGuide, get_parser
    +
    +from pylama.lint import Linter as Abstract
    +
     
     try:
         from StringIO import StringIO
    @@ -8,17 +10,23 @@
         from io import StringIO
     
     
    -class Linter(BaseLinter):
    +class Linter(Abstract):
     
    -    """ PEP8 code check. """
    +    """PEP8 runner."""
     
         @staticmethod
         def run(path, code=None, params=None, **meta):
    -        """ PEP8 code checking.
    +        """Check code with PEP8.
     
             :return list: List of errors.
    -
             """
    +        parser = get_parser()
    +        for option in parser.option_list:
    +            if option.dest and option.dest in params:
    +                value = params[option.dest]
    +                if not isinstance(value, str):
    +                    continue
    +                params[option.dest] = option.convert_value(option, params[option.dest])
             P8Style = StyleGuide(reporter=_PEP8Report, **params)
             buf = StringIO(code)
             return P8Style.input_file(path, lines=buf.readlines())
    @@ -31,13 +39,13 @@ def __init__(self, *args, **kwargs):
             self.errors = []
     
         def init_file(self, filename, lines, expected, line_offset):
    -        """ Prepare storage for errors. """
    +        """Prepare storage for errors."""
             super(_PEP8Report, self).init_file(
                 filename, lines, expected, line_offset)
             self.errors = []
     
         def error(self, line_number, offset, text, check):
    -        """ Save errors. """
    +        """Save errors."""
             code = super(_PEP8Report, self).error(
                 line_number, offset, text, check)
     
    @@ -50,7 +58,7 @@ def error(self, line_number, offset, text, check):
                 ))
     
         def get_file_results(self):
    -        """ Get errors.
    +        """Get errors.
     
             :return list: List of errors.
     
    diff --git a/pymode/libs/pylama/lint/pylama_pyflakes.py b/pymode/libs/pylama/lint/pylama_pyflakes.py
    new file mode 100644
    index 00000000..184d969f
    --- /dev/null
    +++ b/pymode/libs/pylama/lint/pylama_pyflakes.py
    @@ -0,0 +1,49 @@
    +"""Pyflakes support."""
    +
    +from pyflakes import checker
    +
    +from pylama.lint import Linter as Abstract
    +
    +
    +checker.messages.UnusedImport.message = "W0611 %r imported but unused"
    +checker.messages.RedefinedWhileUnused.message = "W0404 redefinition of unused %r from line %r"
    +checker.messages.RedefinedInListComp.message = "W0621 list comprehension redefines %r from line %r"
    +checker.messages.ImportShadowedByLoopVar.message = "W0621 import %r from line %r shadowed by loop variable"
    +checker.messages.ImportStarUsed.message = "W0401 'from %s import *' used; unable to detect undefined names"
    +checker.messages.UndefinedName.message = "E0602 undefined name %r"
    +checker.messages.DoctestSyntaxError.message = "W0511 syntax error in doctest"
    +checker.messages.UndefinedExport.message = "E0603 undefined name %r in __all__"
    +checker.messages.UndefinedLocal.message = "E0602 local variable %r (defined in enclosing scope on line %r) referenced before assignment"
    +checker.messages.DuplicateArgument.message = "E1122 duplicate argument %r in function definition"
    +checker.messages.LateFutureImport.message = "W0410 future import(s) %r after other statements"
    +checker.messages.UnusedVariable.message = "W0612 local variable %r is assigned to but never used"
    +checker.messages.ReturnWithArgsInsideGenerator.message = "E0106 'return' with argument inside generator"
    +checker.messages.ReturnOutsideFunction.message = "E0104 'return' outside function"
    +
    +
    +class Linter(Abstract):
    +
    +    """Pyflakes runner."""
    +
    +    @staticmethod
    +    def run(path, code=None, params=None, **meta):
    +        """Check code with pyflakes.
    +
    +        :return list: List of errors.
    +        """
    +        import _ast
    +
    +        builtins = params.get("builtins", "")
    +
    +        if builtins:
    +            builtins = builtins.split(",")
    +
    +        tree = compile(code, path, "exec", _ast.PyCF_ONLY_AST)
    +        w = checker.Checker(tree, path, builtins=builtins)
    +        w.messages = sorted(w.messages, key=lambda m: m.lineno)
    +        return [
    +            {'lnum': m.lineno, 'text': m.message % m.message_args}
    +            for m in sorted(w.messages, key=lambda m: m.lineno)
    +        ]
    +
    +#  pylama:ignore=E501,C0301
    diff --git a/pymode/libs/pylama/lint/pylama_pyflakes/__init__.py b/pymode/libs/pylama/lint/pylama_pyflakes/__init__.py
    deleted file mode 100644
    index 72fc26fe..00000000
    --- a/pymode/libs/pylama/lint/pylama_pyflakes/__init__.py
    +++ /dev/null
    @@ -1,65 +0,0 @@
    -""" Check Pyflakes. """
    -import sys
    -from os import path as op
    -
    -from .. import Linter as BaseLinter
    -
    -
    -# Use local version of pyflakes
    -path = op.dirname(op.abspath(__file__))
    -sys.path.insert(0, path)
    -
    -from pyflakes import checker
    -
    -
    -class Linter(BaseLinter):
    -
    -    """ Pyflakes code check. """
    -
    -    def __init__(self):
    -        if checker.messages.UndefinedName.message != "E0602 undefined name %r":
    -            monkey_patch_messages(checker.messages)
    -
    -    @staticmethod
    -    def run(path, code=None, params=None, **meta):
    -        """ Pyflake code checking.
    -
    -        :return list: List of errors.
    -
    -        """
    -        import _ast
    -
    -        builtins = params.get("builtins", "")
    -
    -        if builtins:
    -            builtins = builtins.split(",")
    -
    -        errors = []
    -        tree = compile(code, path, "exec", _ast.PyCF_ONLY_AST)
    -        w = checker.Checker(tree, path, builtins=builtins)
    -        w.messages = sorted(w.messages, key=lambda m: m.lineno)
    -        for w in w.messages:
    -            errors.append(dict(
    -                lnum=w.lineno,
    -                text=w.message % w.message_args,
    -            ))
    -        return errors
    -
    -
    -def monkey_patch_messages(messages):
    -    """ Patch pyflakes messages. """
    -
    -    messages.LateFutureImport.message = "W0410 future import(s) %r after other statements"
    -    messages.ImportStarUsed.message = "W0401 'from %s import *' used; unable to detect undefined names"
    -    messages.RedefinedWhileUnused.message = "W0404 redefinition of unused %r from line %r"
    -    messages.DoctestSyntaxError.message = "W0511 syntax error in doctest"
    -    messages.UnusedImport.message = "W0611 %r imported but unused"
    -    messages.UnusedVariable.message = "W0612 local variable %r is assigned to but never used"
    -    messages.RedefinedInListComp.message = "W0621 list comprehension redefines %r from line %r"
    -    messages.Redefined.message = "W0621 redefinition of %r from line %r"
    -    messages.ImportShadowedByLoopVar.message = "W0621 import %r from line %r shadowed by loop variable"
    -    messages.ReturnWithArgsInsideGenerator.message = "E0106 'return' with argument inside generator"
    -    messages.UndefinedName.message = "E0602 undefined name %r"
    -    messages.UndefinedLocal.message = "E0602 local variable %r (defined in enclosing scope on line %r) referenced before assignment"
    -    messages.UndefinedExport.message = "E0603 undefined name %r in __all__"
    -    messages.DuplicateArgument.message = "E1122 duplicate argument %r in function definition"
    diff --git a/pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/__init__.py b/pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/__init__.py
    deleted file mode 100644
    index cb2b136b..00000000
    --- a/pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/__init__.py
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -
    -__version__ = '0.8.2a0'
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/__init__.py b/pymode/libs/pylama/lint/pylama_pylint/__init__.py
    index 6ec4f3ba..74e6bc22 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/__init__.py
    +++ b/pymode/libs/pylama/lint/pylama_pylint/__init__.py
    @@ -4,14 +4,9 @@
     # ==================
     
     
    -__version__ = "0.3.1"
    +__version__ = "2.1.1"
     __project__ = "pylama_pylint"
     __author__ = "horneds "
     __license__ = "BSD"
     
    -import sys
    -if sys.version_info >= (3, 0, 0):
    -    raise ImportError("pylama_pylint doesnt support python3")
    -
    -from .main import Linter
    -assert Linter
    +from .main import Linter  # noqa
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2qt4.py b/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2qt4.py
    deleted file mode 100644
    index 0ee0410e..00000000
    --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2qt4.py
    +++ /dev/null
    @@ -1,25 +0,0 @@
    -"""Astroid hooks for the Python 2 qt4 module.
    -
    -Currently help understanding of :
    -
    -* PyQT4.QtCore
    -"""
    -
    -from astroid import MANAGER
    -from astroid.builder import AstroidBuilder
    -
    -
    -def pyqt4_qtcore_transform(module):
    -    fake = AstroidBuilder(MANAGER).string_build('''
    -
    -def SIGNAL(signal_name): pass
    -
    -class QObject(object):
    -    def emit(self, signal): pass
    -''')
    -    for klass in ('QObject',):
    -        module.locals[klass] = fake.locals[klass]
    -
    -
    -import py2stdlib
    -py2stdlib.MODULE_TRANSFORMS['PyQt4.QtCore'] = pyqt4_qtcore_transform
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2stdlib.py b/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2stdlib.py
    deleted file mode 100644
    index 6b0ef501..00000000
    --- a/pymode/libs/pylama/lint/pylama_pylint/astroid/brain/py2stdlib.py
    +++ /dev/null
    @@ -1,252 +0,0 @@
    -"""Astroid hooks for the Python 2 standard library.
    -
    -Currently help understanding of :
    -
    -* hashlib.md5 and hashlib.sha1
    -"""
    -
    -import sys
    -
    -from astroid import MANAGER, AsStringRegexpPredicate, UseInferenceDefault, inference_tip, YES
    -from astroid import exceptions
    -from astroid import nodes
    -from astroid.builder import AstroidBuilder
    -
    -MODULE_TRANSFORMS = {}
    -PY3K = sys.version_info > (3, 0)
    -
    -
    -# module specific transformation functions #####################################
    -
    -def transform(module):
    -    try:
    -        tr = MODULE_TRANSFORMS[module.name]
    -    except KeyError:
    -        pass
    -    else:
    -        tr(module)
    -MANAGER.register_transform(nodes.Module, transform)
    -
    -# module specific transformation functions #####################################
    -
    -def hashlib_transform(module):
    -    template = '''
    -
    -class %s(object):
    -  def __init__(self, value=''): pass
    -  def digest(self):
    -    return u''
    -  def update(self, value): pass
    -  def hexdigest(self):
    -    return u''
    -'''
    -
    -    algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
    -    classes = "".join(template % hashfunc for hashfunc in algorithms)
    -
    -    fake = AstroidBuilder(MANAGER).string_build(classes)
    -
    -    for hashfunc in algorithms:
    -        module.locals[hashfunc] = fake.locals[hashfunc]
    -
    -def collections_transform(module):
    -    fake = AstroidBuilder(MANAGER).string_build('''
    -
    -class defaultdict(dict):
    -    default_factory = None
    -    def __missing__(self, key): pass
    -
    -class deque(object):
    -    maxlen = 0
    -    def __init__(iterable=None, maxlen=None): pass
    -    def append(self, x): pass
    -    def appendleft(self, x): pass
    -    def clear(self): pass
    -    def count(self, x): return 0
    -    def extend(self, iterable): pass
    -    def extendleft(self, iterable): pass
    -    def pop(self): pass
    -    def popleft(self): pass
    -    def remove(self, value): pass
    -    def reverse(self): pass
    -    def rotate(self, n): pass
    -    def __iter__(self): return self
    -
    -''')
    -
    -    for klass in ('deque', 'defaultdict'):
    -        module.locals[klass] = fake.locals[klass]
    -
    -def pkg_resources_transform(module):
    -    fake = AstroidBuilder(MANAGER).string_build('''
    -
    -def resource_exists(package_or_requirement, resource_name):
    -    pass
    -
    -def resource_isdir(package_or_requirement, resource_name):
    -    pass
    -
    -def resource_filename(package_or_requirement, resource_name):
    -    pass
    -
    -def resource_stream(package_or_requirement, resource_name):
    -    pass
    -
    -def resource_string(package_or_requirement, resource_name):
    -    pass
    -
    -def resource_listdir(package_or_requirement, resource_name):
    -    pass
    -
    -def extraction_error():
    -    pass
    -
    -def get_cache_path(archive_name, names=()):
    -    pass
    -
    -def postprocess(tempname, filename):
    -    pass
    -
    -def set_extraction_path(path):
    -    pass
    -
    -def cleanup_resources(force=False):
    -    pass
    -
    -''')
    -
    -    for func_name, func in fake.locals.items():
    -        module.locals[func_name] = func
    -
    -
    -def urlparse_transform(module):
    -    fake = AstroidBuilder(MANAGER).string_build('''
    -
    -def urlparse(url, scheme='', allow_fragments=True):
    -    return ParseResult()
    -
    -class ParseResult(object):
    -    def __init__(self):
    -        self.scheme = ''
    -        self.netloc = ''
    -        self.path = ''
    -        self.params = ''
    -        self.query = ''
    -        self.fragment = ''
    -        self.username = None
    -        self.password = None
    -        self.hostname = None
    -        self.port = None
    -
    -    def geturl(self):
    -        return ''
    -''')
    -
    -    for func_name, func in fake.locals.items():
    -        module.locals[func_name] = func
    -
    -def subprocess_transform(module):
    -    if PY3K:
    -        communicate = (bytes('string', 'ascii'), bytes('string', 'ascii'))
    -    else:
    -        communicate = ('string', 'string')
    -    fake = AstroidBuilder(MANAGER).string_build('''
    -
    -class Popen(object):
    -    returncode = pid = 0
    -    stdin = stdout = stderr = file()
    -
    -    def __init__(self, args, bufsize=0, executable=None,
    -                 stdin=None, stdout=None, stderr=None,
    -                 preexec_fn=None, close_fds=False, shell=False,
    -                 cwd=None, env=None, universal_newlines=False,
    -                 startupinfo=None, creationflags=0):
    -        pass
    -
    -    def communicate(self, input=None):
    -        return %r
    -    def wait(self):
    -        return self.returncode
    -    def poll(self):
    -        return self.returncode
    -    def send_signal(self, signal):
    -        pass
    -    def terminate(self):
    -        pass
    -    def kill(self):
    -        pass
    -   ''' % (communicate, ))
    -
    -    for func_name, func in fake.locals.items():
    -        module.locals[func_name] = func
    -
    -
    -
    -MODULE_TRANSFORMS['hashlib'] = hashlib_transform
    -MODULE_TRANSFORMS['collections'] = collections_transform
    -MODULE_TRANSFORMS['pkg_resources'] = pkg_resources_transform
    -MODULE_TRANSFORMS['urlparse'] = urlparse_transform
    -MODULE_TRANSFORMS['subprocess'] = subprocess_transform
    -
    -# namedtuple support ###########################################################
    -
    -def infer_named_tuple(node, context=None):
    -    """Specific inference function for namedtuple CallFunc node"""
    -    def infer_first(node):
    -        try:
    -            value = node.infer().next()
    -            if value is YES:
    -                raise UseInferenceDefault()
    -            else:
    -                return value
    -        except StopIteration:
    -            raise InferenceError()
    -
    -    # node is a CallFunc node, class name as first argument and generated class
    -    # attributes as second argument
    -    if len(node.args) != 2:
    -        # something weird here, go back to class implementation
    -        raise UseInferenceDefault()
    -    # namedtuple list of attributes can be a list of strings or a
    -    # whitespace-separate string
    -    try:
    -        name = infer_first(node.args[0]).value
    -        names = infer_first(node.args[1])
    -        try:
    -            attributes = names.value.split()
    -        except AttributeError:
    -            attributes = [infer_first(const).value for const in names.elts]
    -    except (AttributeError, exceptions.InferenceError):
    -        raise UseInferenceDefault()
    -    # we want to return a Class node instance with proper attributes set
    -    class_node = nodes.Class(name, 'docstring')
    -    class_node.parent = node.parent
    -    # set base class=tuple
    -    class_node.bases.append(nodes.Tuple._proxied)
    -    # XXX add __init__(*attributes) method
    -    for attr in attributes:
    -        fake_node = nodes.EmptyNode()
    -        fake_node.parent = class_node
    -        class_node.instance_attrs[attr] = [fake_node]
    -
    -    fake = AstroidBuilder(MANAGER).string_build('''
    -class %(name)s(tuple):
    -    def _asdict(self):
    -        return self.__dict__
    -    @classmethod
    -    def _make(cls, iterable, new=tuple.__new__, len=len):
    -        return new(cls, iterable)
    -    def _replace(_self, **kwds):
    -        result = _self._make(map(kwds.pop, %(fields)r, _self))
    -        if kwds:
    -            raise ValueError('Got unexpected field names: %%r' %% list(kwds))
    -        return result
    -    ''' % {'name': name, 'fields': attributes})
    -    class_node.locals['_asdict'] = fake.body[0].locals['_asdict']
    -    class_node.locals['_make'] = fake.body[0].locals['_make']
    -    class_node.locals['_replace'] = fake.body[0].locals['_replace']
    -    # we use UseInferenceDefault, we can't be a generator so return an iterator
    -    return iter([class_node])
    -
    -MANAGER.register_transform(nodes.CallFunc, inference_tip(infer_named_tuple),
    -                           AsStringRegexpPredicate('namedtuple', 'func'))
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/__pkginfo__.py b/pymode/libs/pylama/lint/pylama_pylint/logilab/common/__pkginfo__.py
    deleted file mode 100644
    index d3be5552..00000000
    --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/__pkginfo__.py
    +++ /dev/null
    @@ -1,53 +0,0 @@
    -# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
    -# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
    -#
    -# This file is part of logilab-common.
    -#
    -# logilab-common is free software: you can redistribute it and/or modify it under
    -# the terms of the GNU Lesser General Public License as published by the Free
    -# Software Foundation, either version 2.1 of the License, or (at your option) any
    -# later version.
    -#
    -# logilab-common is distributed in the hope that it will be useful, but WITHOUT
    -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    -# FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more
    -# details.
    -#
    -# You should have received a copy of the GNU Lesser General Public License along
    -# with logilab-common.  If not, see .
    -"""logilab.common packaging information"""
    -__docformat__ = "restructuredtext en"
    -import sys
    -import os
    -
    -distname = 'logilab-common'
    -modname = 'common'
    -subpackage_of = 'logilab'
    -subpackage_master = True
    -
    -numversion = (0, 61, 0)
    -version = '.'.join([str(num) for num in numversion])
    -
    -license = 'LGPL' # 2.1 or later
    -description = "collection of low-level Python packages and modules used by Logilab projects"
    -web = "http://www.logilab.org/project/%s" % distname
    -mailinglist = "mailto://python-projects@lists.logilab.org"
    -author = "Logilab"
    -author_email = "contact@logilab.fr"
    -
    -
    -from os.path import join
    -scripts = [join('bin', 'pytest')]
    -include_dirs = [join('test', 'data')]
    -
    -install_requires = []
    -if sys.version_info < (2, 7):
    -    install_requires.append('unittest2 >= 0.5.1')
    -if os.name == 'nt':
    -    install_requires.append('colorama')
    -
    -classifiers = ["Topic :: Utilities",
    -               "Programming Language :: Python",
    -               "Programming Language :: Python :: 2",
    -               "Programming Language :: Python :: 3",
    -               ]
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/compat.py b/pymode/libs/pylama/lint/pylama_pylint/logilab/common/compat.py
    deleted file mode 100644
    index 8983ece9..00000000
    --- a/pymode/libs/pylama/lint/pylama_pylint/logilab/common/compat.py
    +++ /dev/null
    @@ -1,243 +0,0 @@
    -# pylint: disable=E0601,W0622,W0611
    -# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
    -# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
    -#
    -# This file is part of logilab-common.
    -#
    -# logilab-common is free software: you can redistribute it and/or modify it under
    -# the terms of the GNU Lesser General Public License as published by the Free
    -# Software Foundation, either version 2.1 of the License, or (at your option) any
    -# later version.
    -#
    -# logilab-common is distributed in the hope that it will be useful, but WITHOUT
    -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    -# FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more
    -# details.
    -#
    -# You should have received a copy of the GNU Lesser General Public License along
    -# with logilab-common.  If not, see .
    -"""Wrappers around some builtins introduced in python 2.3, 2.4 and
    -2.5, making them available in for earlier versions of python.
    -
    -See another compatibility snippets from other projects:
    -
    -    :mod:`lib2to3.fixes`
    -    :mod:`coverage.backward`
    -    :mod:`unittest2.compatibility`
    -"""
    -
    -from __future__ import generators
    -
    -__docformat__ = "restructuredtext en"
    -
    -import os
    -import sys
    -import types
    -from warnings import warn
    -
    -import __builtin__ as builtins # 2to3 will tranform '__builtin__' to 'builtins'
    -
    -if sys.version_info < (3, 0):
    -    str_to_bytes = str
    -    def str_encode(string, encoding):
    -        if isinstance(string, unicode):
    -            return string.encode(encoding)
    -        return str(string)
    -else:
    -    def str_to_bytes(string):
    -        return str.encode(string)
    -    # we have to ignore the encoding in py3k to be able to write a string into a
    -    # TextIOWrapper or like object (which expect an unicode string)
    -    def str_encode(string, encoding):
    -        return str(string)
    -
    -# XXX callable built-in seems back in all python versions
    -try:
    -    callable = builtins.callable
    -except AttributeError:
    -    from collections import Callable
    -    def callable(something):
    -        return isinstance(something, Callable)
    -    del Callable
    -
    -# See also http://bugs.python.org/issue11776
    -if sys.version_info[0] == 3:
    -    def method_type(callable, instance, klass):
    -        # api change. klass is no more considered
    -        return types.MethodType(callable, instance)
    -else:
    -    # alias types otherwise
    -    method_type = types.MethodType
    -
    -if sys.version_info < (3, 0):
    -    raw_input = raw_input
    -else:
    -    raw_input = input
    -
    -# Pythons 2 and 3 differ on where to get StringIO
    -if sys.version_info < (3, 0):
    -    from cStringIO import StringIO
    -    FileIO = file
    -    BytesIO = StringIO
    -    reload = reload
    -else:
    -    from io import FileIO, BytesIO, StringIO
    -    from imp import reload
    -
    -# Where do pickles come from?
    -try:
    -    import cPickle as pickle
    -except ImportError:
    -    import pickle
    -
    -from logilab.common.deprecation import deprecated
    -
    -from itertools import izip, chain, imap
    -if sys.version_info < (3, 0):# 2to3 will remove the imports
    -    izip = deprecated('izip exists in itertools since py2.3')(izip)
    -    imap = deprecated('imap exists in itertools since py2.3')(imap)
    -chain = deprecated('chain exists in itertools since py2.3')(chain)
    -
    -sum = deprecated('sum exists in builtins since py2.3')(sum)
    -enumerate = deprecated('enumerate exists in builtins since py2.3')(enumerate)
    -frozenset = deprecated('frozenset exists in builtins since py2.4')(frozenset)
    -reversed = deprecated('reversed exists in builtins since py2.4')(reversed)
    -sorted = deprecated('sorted exists in builtins since py2.4')(sorted)
    -max = deprecated('max exists in builtins since py2.4')(max)
    -
    -
    -# Python2.5 builtins
    -try:
    -    any = any
    -    all = all
    -except NameError:
    -    def any(iterable):
    -        """any(iterable) -> bool
    -
    -        Return True if bool(x) is True for any x in the iterable.
    -        """
    -        for elt in iterable:
    -            if elt:
    -                return True
    -        return False
    -
    -    def all(iterable):
    -        """all(iterable) -> bool
    -
    -        Return True if bool(x) is True for all values x in the iterable.
    -        """
    -        for elt in iterable:
    -            if not elt:
    -                return False
    -        return True
    -
    -
    -# Python2.5 subprocess added functions and exceptions
    -try:
    -    from subprocess import Popen
    -except ImportError:
    -    # gae or python < 2.3
    -
    -    class CalledProcessError(Exception):
    -        """This exception is raised when a process run by check_call() returns
    -        a non-zero exit status.  The exit status will be stored in the
    -        returncode attribute."""
    -        def __init__(self, returncode, cmd):
    -            self.returncode = returncode
    -            self.cmd = cmd
    -        def __str__(self):
    -            return "Command '%s' returned non-zero exit status %d" % (self.cmd,
    -    self.returncode)
    -
    -    def call(*popenargs, **kwargs):
    -        """Run command with arguments.  Wait for command to complete, then
    -        return the returncode attribute.
    -
    -        The arguments are the same as for the Popen constructor.  Example:
    -
    -        retcode = call(["ls", "-l"])
    -        """
    -        # workaround: subprocess.Popen(cmd, stdout=sys.stdout) fails
    -        # see http://bugs.python.org/issue1531862
    -        if "stdout" in kwargs:
    -            fileno = kwargs.get("stdout").fileno()
    -            del kwargs['stdout']
    -            return Popen(stdout=os.dup(fileno), *popenargs, **kwargs).wait()
    -        return Popen(*popenargs, **kwargs).wait()
    -
    -    def check_call(*popenargs, **kwargs):
    -        """Run command with arguments.  Wait for command to complete.  If
    -        the exit code was zero then return, otherwise raise
    -        CalledProcessError.  The CalledProcessError object will have the
    -        return code in the returncode attribute.
    -
    -        The arguments are the same as for the Popen constructor.  Example:
    -
    -        check_call(["ls", "-l"])
    -        """
    -        retcode = call(*popenargs, **kwargs)
    -        cmd = kwargs.get("args")
    -        if cmd is None:
    -            cmd = popenargs[0]
    -        if retcode:
    -            raise CalledProcessError(retcode, cmd)
    -        return retcode
    -
    -try:
    -    from os.path import relpath
    -except ImportError: # python < 2.6
    -    from os.path import curdir, abspath, sep, commonprefix, pardir, join
    -    def relpath(path, start=curdir):
    -        """Return a relative version of a path"""
    -
    -        if not path:
    -            raise ValueError("no path specified")
    -
    -        start_list = abspath(start).split(sep)
    -        path_list = abspath(path).split(sep)
    -
    -        # Work out how much of the filepath is shared by start and path.
    -        i = len(commonprefix([start_list, path_list]))
    -
    -        rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
    -        if not rel_list:
    -            return curdir
    -        return join(*rel_list)
    -
    -
    -# XXX don't know why tests don't pass if I don't do that :
    -_real_set, set = set, deprecated('set exists in builtins since py2.4')(set)
    -if (2, 5) <= sys.version_info[:2]:
    -    InheritableSet = _real_set
    -else:
    -    class InheritableSet(_real_set):
    -        """hacked resolving inheritancy issue from old style class in 2.4"""
    -        def __new__(cls, *args, **kwargs):
    -            if args:
    -                new_args = (args[0], )
    -            else:
    -                new_args = ()
    -            obj = _real_set.__new__(cls, *new_args)
    -            obj.__init__(*args, **kwargs)
    -            return obj
    -
    -# XXX shouldn't we remove this and just let 2to3 do his job ?
    -# range or xrange?
    -try:
    -    range = xrange
    -except NameError:
    -    range = range
    -
    -# ConfigParser was renamed to the more-standard configparser
    -try:
    -    import configparser
    -except ImportError:
    -    import ConfigParser as configparser
    -
    -try:
    -    import json
    -except ImportError:
    -    try:
    -        import simplejson as json
    -    except ImportError:
    -        json = None
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/main.py b/pymode/libs/pylama/lint/pylama_pylint/main.py
    index 411ba31d..f50b6647 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/main.py
    +++ b/pymode/libs/pylama/lint/pylama_pylint/main.py
    @@ -1,12 +1,10 @@
     """ Pylint support. """
     from os import path as op, environ
    -import sys
     import logging
     
     from pylama.lint import Linter as BaseLinter
     
     CURDIR = op.abspath(op.dirname(__file__))
    -sys.path.insert(0, CURDIR)
     
     from astroid import MANAGER
     from pylint.lint import Run
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/stdlib.py b/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/stdlib.py
    deleted file mode 100644
    index a1c31337..00000000
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/stdlib.py
    +++ /dev/null
    @@ -1,69 +0,0 @@
    -# Copyright 2012 Google Inc.
    -#
    -# http://www.logilab.fr/ -- mailto:contact@logilab.fr
    -# This program is free software; you can redistribute it and/or modify it under
    -# the terms of the GNU General Public License as published by the Free Software
    -# Foundation; either version 2 of the License, or (at your option) any later
    -# version.
    -#
    -# This program is distributed in the hope that it will be useful, but WITHOUT
    -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
    -#
    -# You should have received a copy of the GNU General Public License along with
    -# this program; if not, write to the Free Software Foundation, Inc.,
    -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    -"""Checkers for various standard library functions."""
    -
    -import re
    -import sys
    -
    -import astroid
    -
    -from pylint.interfaces import IAstroidChecker
    -from pylint.checkers import BaseChecker
    -from pylint.checkers import utils
    -
    -_VALID_OPEN_MODE_REGEX = r'^(r?U|[rwa]\+?b?)$'
    -
    -if sys.version_info >= (3, 0):
    -    OPEN_MODULE = '_io'
    -else:
    -    OPEN_MODULE = '__builtin__'
    -
    -class OpenModeChecker(BaseChecker):
    -    __implements__ = (IAstroidChecker,)
    -    name = 'open_mode'
    -
    -    msgs = {
    -        'W1501': ('"%s" is not a valid mode for open.',
    -                  'bad-open-mode',
    -                  'Python supports: r, w, a modes with b, +, and U options. '
    -                  'See http://docs.python.org/2/library/functions.html#open'),
    -        }
    -
    -    @utils.check_messages('bad-open-mode')
    -    def visit_callfunc(self, node):
    -        """Visit a CallFunc node."""
    -        if hasattr(node, 'func'):
    -            infer = utils.safe_infer(node.func)
    -            if infer and infer.root().name == OPEN_MODULE:
    -                if getattr(node.func, 'name', None) in ('open', 'file'):
    -                    self._check_open_mode(node)
    -
    -    def _check_open_mode(self, node):
    -        """Check that the mode argument of an open or file call is valid."""
    -        try:
    -            mode_arg = utils.get_argument_from_call(node, position=1, keyword='mode')
    -            if mode_arg:
    -                mode_arg = utils.safe_infer(mode_arg)
    -                if (isinstance(mode_arg, astroid.Const)
    -                    and not re.match(_VALID_OPEN_MODE_REGEX, mode_arg.value)):
    -                    self.add_message('bad-open-mode', node=node, args=(mode_arg.value))
    -        except (utils.NoSuchArgumentError, TypeError):
    -            pass
    -
    -def register(linter):
    -    """required method to auto register this checker """
    -    linter.register_checker(OpenModeChecker(linter))
    -
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/strings.py b/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/strings.py
    deleted file mode 100644
    index 04cf1bc7..00000000
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/strings.py
    +++ /dev/null
    @@ -1,304 +0,0 @@
    -# Copyright (c) 2009-2010 Arista Networks, Inc. - James Lingard
    -# Copyright (c) 2004-2013 LOGILAB S.A. (Paris, FRANCE).
    -# Copyright 2012 Google Inc.
    -#
    -# http://www.logilab.fr/ -- mailto:contact@logilab.fr
    -# This program is free software; you can redistribute it and/or modify it under
    -# the terms of the GNU General Public License as published by the Free Software
    -# Foundation; either version 2 of the License, or (at your option) any later
    -# version.
    -#
    -# This program is distributed in the hope that it will be useful, but WITHOUT
    -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
    -#
    -# You should have received a copy of the GNU General Public License along with
    -# this program; if not, write to the Free Software Foundation, Inc.,
    -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    -"""Checker for string formatting operations.
    -"""
    -
    -import sys
    -import tokenize
    -
    -import astroid
    -
    -from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
    -from pylint.checkers import BaseChecker, BaseTokenChecker
    -from pylint.checkers import utils
    -from pylint.checkers.utils import check_messages
    -
    -_PY3K = sys.version_info >= (3, 0)
    -
    -MSGS = {
    -    'E1300': ("Unsupported format character %r (%#02x) at index %d",
    -              "bad-format-character",
    -              "Used when a unsupported format character is used in a format\
    -              string."),
    -    'E1301': ("Format string ends in middle of conversion specifier",
    -              "truncated-format-string",
    -              "Used when a format string terminates before the end of a \
    -              conversion specifier."),
    -    'E1302': ("Mixing named and unnamed conversion specifiers in format string",
    -              "mixed-format-string",
    -              "Used when a format string contains both named (e.g. '%(foo)d') \
    -              and unnamed (e.g. '%d') conversion specifiers.  This is also \
    -              used when a named conversion specifier contains * for the \
    -              minimum field width and/or precision."),
    -    'E1303': ("Expected mapping for format string, not %s",
    -              "format-needs-mapping",
    -              "Used when a format string that uses named conversion specifiers \
    -              is used with an argument that is not a mapping."),
    -    'W1300': ("Format string dictionary key should be a string, not %s",
    -              "bad-format-string-key",
    -              "Used when a format string that uses named conversion specifiers \
    -              is used with a dictionary whose keys are not all strings."),
    -    'W1301': ("Unused key %r in format string dictionary",
    -              "unused-format-string-key",
    -              "Used when a format string that uses named conversion specifiers \
    -              is used with a dictionary that conWtains keys not required by the \
    -              format string."),
    -    'E1304': ("Missing key %r in format string dictionary",
    -              "missing-format-string-key",
    -              "Used when a format string that uses named conversion specifiers \
    -              is used with a dictionary that doesn't contain all the keys \
    -              required by the format string."),
    -    'E1305': ("Too many arguments for format string",
    -              "too-many-format-args",
    -              "Used when a format string that uses unnamed conversion \
    -              specifiers is given too many arguments."),
    -    'E1306': ("Not enough arguments for format string",
    -              "too-few-format-args",
    -              "Used when a format string that uses unnamed conversion \
    -              specifiers is given too few arguments"),
    -    }
    -
    -OTHER_NODES = (astroid.Const, astroid.List, astroid.Backquote,
    -               astroid.Lambda, astroid.Function,
    -               astroid.ListComp, astroid.SetComp, astroid.GenExpr)
    -
    -class StringFormatChecker(BaseChecker):
    -    """Checks string formatting operations to ensure that the format string
    -    is valid and the arguments match the format string.
    -    """
    -
    -    __implements__ = (IAstroidChecker,)
    -    name = 'string'
    -    msgs = MSGS
    -
    -    @check_messages(*(MSGS.keys()))
    -    def visit_binop(self, node):
    -        if node.op != '%':
    -            return
    -        left = node.left
    -        args = node.right
    -
    -        if not (isinstance(left, astroid.Const)
    -            and isinstance(left.value, basestring)):
    -            return
    -        format_string = left.value
    -        try:
    -            required_keys, required_num_args = \
    -                utils.parse_format_string(format_string)
    -        except utils.UnsupportedFormatCharacter, e:
    -            c = format_string[e.index]
    -            self.add_message('bad-format-character', node=node, args=(c, ord(c), e.index))
    -            return
    -        except utils.IncompleteFormatString:
    -            self.add_message('truncated-format-string', node=node)
    -            return
    -        if required_keys and required_num_args:
    -            # The format string uses both named and unnamed format
    -            # specifiers.
    -            self.add_message('mixed-format-string', node=node)
    -        elif required_keys:
    -            # The format string uses only named format specifiers.
    -            # Check that the RHS of the % operator is a mapping object
    -            # that contains precisely the set of keys required by the
    -            # format string.
    -            if isinstance(args, astroid.Dict):
    -                keys = set()
    -                unknown_keys = False
    -                for k, _ in args.items:
    -                    if isinstance(k, astroid.Const):
    -                        key = k.value
    -                        if isinstance(key, basestring):
    -                            keys.add(key)
    -                        else:
    -                            self.add_message('bad-format-string-key', node=node, args=key)
    -                    else:
    -                        # One of the keys was something other than a
    -                        # constant.  Since we can't tell what it is,
    -                        # supress checks for missing keys in the
    -                        # dictionary.
    -                        unknown_keys = True
    -                if not unknown_keys:
    -                    for key in required_keys:
    -                        if key not in keys:
    -                            self.add_message('missing-format-string-key', node=node, args=key)
    -                for key in keys:
    -                    if key not in required_keys:
    -                        self.add_message('unused-format-string-key', node=node, args=key)
    -            elif isinstance(args, OTHER_NODES + (astroid.Tuple,)):
    -                type_name = type(args).__name__
    -                self.add_message('format-needs-mapping', node=node, args=type_name)
    -            # else:
    -                # The RHS of the format specifier is a name or
    -                # expression.  It may be a mapping object, so
    -                # there's nothing we can check.
    -        else:
    -            # The format string uses only unnamed format specifiers.
    -            # Check that the number of arguments passed to the RHS of
    -            # the % operator matches the number required by the format
    -            # string.
    -            if isinstance(args, astroid.Tuple):
    -                num_args = len(args.elts)
    -            elif isinstance(args, OTHER_NODES + (astroid.Dict, astroid.DictComp)):
    -                num_args = 1
    -            else:
    -                # The RHS of the format specifier is a name or
    -                # expression.  It could be a tuple of unknown size, so
    -                # there's nothing we can check.
    -                num_args = None
    -            if num_args is not None:
    -                if num_args > required_num_args:
    -                    self.add_message('too-many-format-args', node=node)
    -                elif num_args < required_num_args:
    -                    self.add_message('too-few-format-args', node=node)
    -
    -
    -class StringMethodsChecker(BaseChecker):
    -    __implements__ = (IAstroidChecker,)
    -    name = 'string'
    -    msgs = {
    -        'E1310': ("Suspicious argument in %s.%s call",
    -                  "bad-str-strip-call",
    -                  "The argument to a str.{l,r,}strip call contains a"
    -                  " duplicate character, "),
    -        }
    -
    -    @check_messages(*(MSGS.keys()))
    -    def visit_callfunc(self, node):
    -        func = utils.safe_infer(node.func)
    -        if (isinstance(func, astroid.BoundMethod)
    -            and isinstance(func.bound, astroid.Instance)
    -            and func.bound.name in ('str', 'unicode', 'bytes')
    -            and func.name in ('strip', 'lstrip', 'rstrip')
    -            and node.args):
    -            arg = utils.safe_infer(node.args[0])
    -            if not isinstance(arg, astroid.Const):
    -                return
    -            if len(arg.value) != len(set(arg.value)):
    -                self.add_message('bad-str-strip-call', node=node,
    -                                 args=(func.bound.name, func.name))
    -
    -
    -class StringConstantChecker(BaseTokenChecker):
    -    """Check string literals"""
    -    __implements__ = (ITokenChecker, IRawChecker)
    -    name = 'string_constant'
    -    msgs = {
    -        'W1401': ('Anomalous backslash in string: \'%s\'. '
    -                  'String constant might be missing an r prefix.',
    -                  'anomalous-backslash-in-string',
    -                  'Used when a backslash is in a literal string but not as an '
    -                  'escape.'),
    -        'W1402': ('Anomalous Unicode escape in byte string: \'%s\'. '
    -                  'String constant might be missing an r or u prefix.',
    -                  'anomalous-unicode-escape-in-string',
    -                  'Used when an escape like \\u is encountered in a byte '
    -                  'string where it has no effect.'),
    -        }
    -
    -    # Characters that have a special meaning after a backslash in either
    -    # Unicode or byte strings.
    -    ESCAPE_CHARACTERS = 'abfnrtvx\n\r\t\\\'\"01234567'
    -
    -    # TODO(mbp): Octal characters are quite an edge case today; people may
    -    # prefer a separate warning where they occur.  \0 should be allowed.
    -
    -    # Characters that have a special meaning after a backslash but only in
    -    # Unicode strings.
    -    UNICODE_ESCAPE_CHARACTERS = 'uUN'
    -
    -    def process_module(self, module):
    -        self._unicode_literals = 'unicode_literals' in module.future_imports
    -
    -    def process_tokens(self, tokens):
    -        for (tok_type, token, (start_row, start_col), _, _) in tokens:
    -            if tok_type == tokenize.STRING:
    -                # 'token' is the whole un-parsed token; we can look at the start
    -                # of it to see whether it's a raw or unicode string etc.
    -                self.process_string_token(token, start_row, start_col)
    -
    -    def process_string_token(self, token, start_row, start_col):
    -        for i, c in enumerate(token):
    -            if c in '\'\"':
    -                quote_char = c
    -                break
    -        prefix = token[:i].lower() #  markers like u, b, r.
    -        after_prefix = token[i:]
    -        if after_prefix[:3] == after_prefix[-3:] == 3 * quote_char:
    -            string_body = after_prefix[3:-3]
    -        else:
    -            string_body = after_prefix[1:-1]  # Chop off quotes
    -        # No special checks on raw strings at the moment.
    -        if 'r' not in prefix:
    -            self.process_non_raw_string_token(prefix, string_body,
    -                start_row, start_col)
    -
    -    def process_non_raw_string_token(self, prefix, string_body, start_row,
    -                                     start_col):
    -        """check for bad escapes in a non-raw string.
    -
    -        prefix: lowercase string of eg 'ur' string prefix markers.
    -        string_body: the un-parsed body of the string, not including the quote
    -        marks.
    -        start_row: integer line number in the source.
    -        start_col: integer column number in the source.
    -        """
    -        # Walk through the string; if we see a backslash then escape the next
    -        # character, and skip over it.  If we see a non-escaped character,
    -        # alert, and continue.
    -        #
    -        # Accept a backslash when it escapes a backslash, or a quote, or
    -        # end-of-line, or one of the letters that introduce a special escape
    -        # sequence 
    -        #
    -        # TODO(mbp): Maybe give a separate warning about the rarely-used
    -        # \a \b \v \f?
    -        #
    -        # TODO(mbp): We could give the column of the problem character, but
    -        # add_message doesn't seem to have a way to pass it through at present.
    -        i = 0
    -        while True:
    -            i = string_body.find('\\', i)
    -            if i == -1:
    -                break
    -            # There must be a next character; having a backslash at the end
    -            # of the string would be a SyntaxError.
    -            next_char = string_body[i+1]
    -            match = string_body[i:i+2]
    -            if next_char in self.UNICODE_ESCAPE_CHARACTERS:
    -                if 'u' in prefix:
    -                    pass
    -                elif (_PY3K or self._unicode_literals) and 'b' not in prefix:
    -                    pass  # unicode by default
    -                else:
    -                    self.add_message('anomalous-unicode-escape-in-string', 
    -                                     line=start_row, args=(match, ))
    -            elif next_char not in self.ESCAPE_CHARACTERS:
    -                self.add_message('anomalous-backslash-in-string', 
    -                                 line=start_row, args=(match, ))
    -            # Whether it was a valid escape or not, backslash followed by
    -            # another character can always be consumed whole: the second
    -            # character can never be the start of a new backslash escape.
    -            i += 2
    -
    -
    -
    -def register(linter):
    -    """required method to auto register this checker """
    -    linter.register_checker(StringFormatChecker(linter))
    -    linter.register_checker(StringMethodsChecker(linter))
    -    linter.register_checker(StringConstantChecker(linter))
    diff --git a/pymode/libs/pylama/main.py b/pymode/libs/pylama/main.py
    index 9ce91c37..2cf2e929 100644
    --- a/pymode/libs/pylama/main.py
    +++ b/pymode/libs/pylama/main.py
    @@ -1,4 +1,4 @@
    -""" Pylama's shell support. """
    +"""Pylama's shell support."""
     
     from __future__ import absolute_import, with_statement
     
    @@ -6,11 +6,58 @@
     from os import walk, path as op
     
     from .config import parse_options, CURDIR, setup_logger
    -from .core import LOGGER
    +from .core import LOGGER, run
    +from .async import check_async
    +
    +
    +def check_path(options, rootdir=None, candidates=None, code=None):
    +    """Check path.
    +
    +    :param rootdir: Root directory (for making relative file paths)
    +    :param options: Parsed pylama options (from pylama.config.parse_options)
    +
    +    :returns: (list) Errors list
    +
    +    """
    +    if not candidates:
    +        candidates = []
    +        for path_ in options.paths:
    +            path = op.abspath(path_)
    +            if op.isdir(path):
    +                for root, _, files in walk(path):
    +                    candidates += [op.relpath(op.join(root, f), CURDIR) for f in files]
    +            else:
    +                candidates.append(path)
    +
    +    if rootdir is None:
    +        rootdir = path if op.isdir(path) else op.dirname(path)
    +
    +    paths = []
    +    for path in candidates:
    +
    +        if not options.force and not any(l.allow(path) for _, l in options.linters):
    +            continue
    +
    +        if not op.exists(path):
    +            continue
    +
    +        if options.skip and any(p.match(path) for p in options.skip):
    +            LOGGER.info('Skip path: %s', path)
    +            continue
    +
    +        paths.append(path)
    +
    +    if options.async:
    +        return check_async(paths, options, rootdir)
    +
    +    errors = []
    +    for path in paths:
    +        errors += run(path=path, code=code, rootdir=rootdir, options=options)
    +    return errors
     
     
     def shell(args=None, error=True):
    -    """ Endpoint for console.
    +    """Endpoint for console.
     
         Parse a command arguments, configuration files and run a checkers.
     
    @@ -30,49 +77,20 @@ def shell(args=None, error=True):
             from .hook import install_hook
             return install_hook(options.path)
     
    -    paths = [options.path]
    -
    -    if op.isdir(options.path):
    -        paths = []
    -        for root, _, files in walk(options.path):
    -            paths += [op.relpath(op.join(root, f), CURDIR) for f in files]
    +    return process_paths(options, error=error)
     
    -    return check_files(paths, options, error=error)
     
    +def process_paths(options, candidates=None, error=True):
    +    """Process files and log errors."""
    +    errors = check_path(options, rootdir=CURDIR, candidates=candidates)
     
    -def check_files(paths, options, rootpath=None, error=True):
    -    """ Check files.
    -
    -    :return list: list of errors
    -    :raise SystemExit:
    -
    -    """
    -    from .tasks import async_check_files
    -
    -    if rootpath is None:
    -        rootpath = CURDIR
    -
    -    pattern = "%(rel)s:%(lnum)s:%(col)s: %(text)s"
    +    pattern = "%(filename)s:%(lnum)s:%(col)s: %(text)s"
         if options.format == 'pylint':
    -        pattern = "%(rel)s:%(lnum)s: [%(type)s] %(text)s"
    -
    -    work_paths = []
    -    for path in paths:
    -
    -        if not options.force and not any(l.allow(path) for _, l in options.linters): # noqa
    -            continue
    -
    -        if not op.exists(path):
    -            continue
    -
    -        if options.skip and any(p.match(path) for p in options.skip):
    -            LOGGER.info('Skip path: %s', path)
    -            continue
    -        work_paths.append(path)
    -
    -    errors = async_check_files(work_paths, options, rootpath=rootpath)
    +        pattern = "%(filename)s:%(lnum)s: [%(type)s] %(text)s"
     
         for er in errors:
    +        if options.abspath:
    +            er._info['filename'] = op.abspath(er.filename)
             LOGGER.warning(pattern, er._info)
     
         if error:
    @@ -83,3 +101,5 @@ def check_files(paths, options, rootpath=None, error=True):
     
     if __name__ == '__main__':
         shell()
    +
    +# pylama:ignore=F0001
    diff --git a/pymode/libs/pylama/pytest.py b/pymode/libs/pylama/pytest.py
    index cbfe787d..eeaa58ce 100644
    --- a/pymode/libs/pylama/pytest.py
    +++ b/pymode/libs/pylama/pytest.py
    @@ -3,7 +3,7 @@
     
     from os import path as op
     
    -import py
    +import py # noqa
     import pytest
     
     
    @@ -59,11 +59,12 @@ def setup(self):
                 pytest.skip("file(s) previously passed Pylama checks")
     
         def runtest(self):
    -        call = py.io.StdCapture.call
    -        errors, out, err = call(check_file, self.fspath)
    -        # errors = check_file(self.fspath)
    +        errors = check_file(self.fspath)
             if errors:
    -            raise PylamaError(out, err)
    +            pattern = "%(filename)s:%(lnum)s:%(col)s: %(text)s"
    +            out = "\n".join([pattern % e._info for e in errors])
    +            raise PylamaError(out)
    +
             # update mtime only if test passed
             # otherwise failures would not be re-run next time
             if self.cache:
    @@ -76,11 +77,11 @@ def repr_failure(self, excinfo):
     
     
     def check_file(path):
    -    from pylama.main import parse_options, check_files
    +    from pylama.main import parse_options, process_paths
         from pylama.config import CURDIR
     
         options = parse_options()
         path = op.relpath(str(path), CURDIR)
    -    return check_files([path], options, error=False)
    +    return process_paths(options, candidates=[path], error=False)
     
    -# pylama:ignore=D,E1002,W0212
    +# pylama:ignore=D,E1002,W0212,F0001
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/__init__.py b/pymode/libs/pylint/__init__.py
    similarity index 96%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/__init__.py
    rename to pymode/libs/pylint/__init__.py
    index eed1b62f..82e557dc 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/__init__.py
    +++ b/pymode/libs/pylint/__init__.py
    @@ -15,6 +15,8 @@
     # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
     import sys
     
    +from .__pkginfo__ import version as __version__
    +
     def run_pylint():
         """run pylint"""
         from pylint.lint import Run
    diff --git a/pymode/libs/pylint/__main__.py b/pymode/libs/pylint/__main__.py
    new file mode 100644
    index 00000000..7716361d
    --- /dev/null
    +++ b/pymode/libs/pylint/__main__.py
    @@ -0,0 +1,3 @@
    +#!/usr/bin/env python
    +import pylint
    +pylint.run_pylint()
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/__pkginfo__.py b/pymode/libs/pylint/__pkginfo__.py
    similarity index 90%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/__pkginfo__.py
    rename to pymode/libs/pylint/__pkginfo__.py
    index 86488fa5..33ae5b64 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/__pkginfo__.py
    +++ b/pymode/libs/pylint/__pkginfo__.py
    @@ -15,18 +15,14 @@
     # this program; if not, write to the Free Software Foundation, Inc.,
     # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
     """pylint packaging information"""
    -import sys
    +from __future__ import absolute_import
     
     modname = distname = 'pylint'
     
    -numversion = (1, 2, 1)
    +numversion = (1, 4, 4)
     version = '.'.join([str(num) for num in numversion])
     
    -if sys.version_info < (2, 6):
    -    install_requires = ['logilab-common >= 0.53.0', 'astroid >= 1.1',
    -                        'StringFormat']
    -else:
    -    install_requires = ['logilab-common >= 0.53.0', 'astroid >= 1.1']
    +install_requires = ['logilab-common >= 0.53.0', 'astroid >= 1.3.6', 'six']
     
     license = 'GPL'
     description = "python code static checker"
    @@ -71,4 +67,4 @@
                for filename in ('pylint', 'pylint-gui', "symilar", "epylint",
                                 "pyreverse")]
     
    -include_dirs = ['test']
    +include_dirs = [join('pylint', 'test')]
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/__init__.py b/pymode/libs/pylint/checkers/__init__.py
    similarity index 75%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/__init__.py
    rename to pymode/libs/pylint/checkers/__init__.py
    index af7965be..51adb4d0 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/__init__.py
    +++ b/pymode/libs/pylint/checkers/__init__.py
    @@ -30,7 +30,9 @@
     12: logging
     13: string_format
     14: string_constant
    -15-50: not yet used: reserved for future internal checkers.
    +15: stdlib
    +16: python3
    +17-50: not yet used: reserved for future internal checkers.
     51-99: perhaps used: reserved for external checkers
     
     The raw_metrics checker has no number associated since it doesn't emit any
    @@ -42,11 +44,12 @@
     import tokenize
     import warnings
     
    -from astroid.utils import ASTWalker
     from logilab.common.configuration import OptionsProviderMixIn
     
     from pylint.reporters import diff_string
     from pylint.utils import register_plugins
    +from pylint.interfaces import UNDEFINED
    +
     
     def table_lines_from_stats(stats, old_stats, columns):
         """get values listed in  from  and ,
    @@ -56,7 +59,7 @@ def table_lines_from_stats(stats, old_stats, columns):
         lines = []
         for m_type in columns:
             new = stats[m_type]
    -        format = str
    +        format = str # pylint: disable=redefined-builtin
             if isinstance(new, float):
                 format = lambda num: '%.3f' % num
             old = old_stats.get(m_type)
    @@ -69,7 +72,7 @@ def table_lines_from_stats(stats, old_stats, columns):
         return lines
     
     
    -class BaseChecker(OptionsProviderMixIn, ASTWalker):
    +class BaseChecker(OptionsProviderMixIn):
         """base class for checkers"""
         # checker name (you may reuse an existing one)
         name = None
    @@ -81,20 +84,21 @@ class BaseChecker(OptionsProviderMixIn, ASTWalker):
         msgs = {}
         # reports issued by this checker
         reports = ()
    +    # mark this checker as enabled or not.
    +    enabled = True
     
         def __init__(self, linter=None):
             """checker instances should have the linter as argument
     
             linter is an object implementing ILinter
             """
    -        ASTWalker.__init__(self, self)
             self.name = self.name.lower()
             OptionsProviderMixIn.__init__(self)
             self.linter = linter
     
    -    def add_message(self, msg_id, line=None, node=None, args=None):
    +    def add_message(self, msg_id, line=None, node=None, args=None, confidence=UNDEFINED):
             """add a message of a given type"""
    -        self.linter.add_message(msg_id, line, node, args)
    +        self.linter.add_message(msg_id, line, node, args, confidence)
     
         # dummy methods implementing the IChecker interface
     
    @@ -105,31 +109,6 @@ def close(self):
             """called after visiting project (i.e set of modules)"""
     
     
    -class BaseRawChecker(BaseChecker):
    -    """base class for raw checkers"""
    -
    -    def process_module(self, node):
    -        """process a module
    -
    -        the module's content is accessible via the stream object
    -
    -        stream must implement the readline method
    -        """
    -        warnings.warn("Modules that need access to the tokens should "
    -                      "use the ITokenChecker interface.",
    -                      DeprecationWarning)
    -        stream = node.file_stream
    -        stream.seek(0) # XXX may be removed with astroid > 0.23
    -        if sys.version_info <= (3, 0):
    -            self.process_tokens(tokenize.generate_tokens(stream.readline))
    -        else:
    -            self.process_tokens(tokenize.tokenize(stream.readline))
    -
    -    def process_tokens(self, tokens):
    -        """should be overridden by subclasses"""
    -        raise NotImplementedError()
    -
    -
     class BaseTokenChecker(BaseChecker):
         """Base class for checkers that want to have access to the token stream."""
     
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/base.py b/pymode/libs/pylint/checkers/base.py
    similarity index 64%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/base.py
    rename to pymode/libs/pylint/checkers/base.py
    index 8136d0f3..6ce88251 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/base.py
    +++ b/pymode/libs/pylint/checkers/base.py
    @@ -16,13 +16,21 @@
     # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
     """basic checker for Python code"""
     
    +import collections
    +import itertools
     import sys
    -import astroid
    +import re
    +
    +import six
    +from six.moves import zip  # pylint: disable=redefined-builtin
    +
     from logilab.common.ureports import Table
    -from astroid import are_exclusive, InferenceError
    +
    +import astroid
     import astroid.bases
    +from astroid import are_exclusive, InferenceError
     
    -from pylint.interfaces import IAstroidChecker
    +from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
     from pylint.utils import EmptyReport
     from pylint.reporters import diff_string
     from pylint.checkers import BaseChecker
    @@ -34,12 +42,13 @@
         overrides_a_method,
         safe_infer,
         get_argument_from_call,
    +    has_known_bases,
         NoSuchArgumentError,
    +    is_import_error,
    +    unimplemented_abstract_methods,
         )
     
     
    -import re
    -
     # regex for class/function/variable/constant name
     CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
     MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
    @@ -53,16 +62,43 @@
                         ('__reversed__', ))
     
     PY33 = sys.version_info >= (3, 3)
    -BAD_FUNCTIONS = ['map', 'filter', 'apply']
    +PY3K = sys.version_info >= (3, 0)
    +BAD_FUNCTIONS = ['map', 'filter']
     if sys.version_info < (3, 0):
         BAD_FUNCTIONS.append('input')
    -    BAD_FUNCTIONS.append('file')
     
     # Name categories that are always consistent with all naming conventions.
     EXEMPT_NAME_CATEGORIES = set(('exempt', 'ignore'))
     
    +# A mapping from builtin-qname -> symbol, to be used when generating messages
    +# about dangerous default values as arguments
    +DEFAULT_ARGUMENT_SYMBOLS = dict(
    +    zip(['.'.join([astroid.bases.BUILTINS, x]) for x in ('set', 'dict', 'list')],
    +        ['set()', '{}', '[]'])
    +)
    +
     del re
     
    +def _redefines_import(node):
    +    """ Detect that the given node (AssName) is inside an
    +    exception handler and redefines an import from the tryexcept body.
    +    Returns True if the node redefines an import, False otherwise.
    +    """
    +    current = node
    +    while current and not isinstance(current.parent, astroid.ExceptHandler):
    +        current = current.parent
    +    if not current or not is_import_error(current.parent):
    +        return False
    +    try_block = current.parent.parent
    +    for import_node in try_block.nodes_of_class((astroid.From, astroid.Import)):
    +        for name, alias in import_node.names:
    +            if alias:
    +                if alias == node.name:
    +                    return True
    +            elif name == node.name:
    +                return True
    +    return False
    +
     def in_loop(node):
         """return True if the node is inside a kind of for loop"""
         parent = node.parent
    @@ -93,6 +129,7 @@ def _loop_exits_early(loop):
         for child in loop.body:
             if isinstance(child, loop_nodes):
                 # break statement may be in orelse of child loop.
    +            # pylint: disable=superfluous-parens
                 for orelse in (child.orelse or ()):
                     for _ in orelse.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
                         return True
    @@ -101,12 +138,18 @@ def _loop_exits_early(loop):
                 return True
         return False
     
    +def _is_multi_naming_match(match, node_type, confidence):
    +    return (match is not None and
    +            match.lastgroup is not None and
    +            match.lastgroup not in EXEMPT_NAME_CATEGORIES
    +            and (node_type != 'method' or confidence != INFERENCE_FAILURE))
    +
    +
     if sys.version_info < (3, 0):
         PROPERTY_CLASSES = set(('__builtin__.property', 'abc.abstractproperty'))
     else:
         PROPERTY_CLASSES = set(('builtins.property', 'abc.abstractproperty'))
    -ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod',
    -                   'abc.abstractclassmethod', 'abc.abstractstaticmethod'))
    +
     
     def _determine_function_name_type(node):
         """Determine the name type whose regex the a function's name should match.
    @@ -124,8 +167,8 @@ def _determine_function_name_type(node):
             # If the function is a property (decorated with @property
             # or @abc.abstractproperty), the name type is 'attr'.
             if (isinstance(decorator, astroid.Name) or
    -            (isinstance(decorator, astroid.Getattr) and
    -             decorator.attrname == 'abstractproperty')):
    +                (isinstance(decorator, astroid.Getattr) and
    +                 decorator.attrname == 'abstractproperty')):
                 infered = safe_infer(decorator)
                 if infered and infered.qname() in PROPERTY_CLASSES:
                     return 'attr'
    @@ -136,25 +179,17 @@ def _determine_function_name_type(node):
                 return 'attr'
         return 'method'
     
    -def decorated_with_abc(func):
    -    """ Determine if the `func` node is decorated
    -    with `abc` decorators (abstractmethod et co.)
    +
    +
    +def _has_abstract_methods(node):
         """
    -    if func.decorators:
    -        for node in func.decorators.nodes:
    -            try:
    -                infered = node.infer().next()
    -            except InferenceError:
    -                continue
    -            if infered and infered.qname() in ABC_METHODS:
    -                return True
    +    Determine if the given `node` has abstract methods.
     
    -def has_abstract_methods(node):
    -    """ Determine if the given `node` has
    -    abstract methods, defined with `abc` module.
    +    The methods should be made abstract by decorating them
    +    with `abc` decorators.
         """
    -    return any(decorated_with_abc(meth)
    -               for meth in node.mymethods())
    +    return len(unimplemented_abstract_methods(node)) > 0
    +
     
     def report_by_type_stats(sect, stats, old_stats):
         """make a report of
    @@ -208,7 +243,7 @@ def x(self, value): self._x = value
         if node.decorators:
             for decorator in node.decorators.nodes:
                 if (isinstance(decorator, astroid.Getattr) and
    -                getattr(decorator.expr, 'name', None) == node.name):
    +                    getattr(decorator.expr, 'name', None) == node.name):
                     return True
         return False
     
    @@ -218,57 +253,52 @@ class _BasicChecker(BaseChecker):
     
     class BasicErrorChecker(_BasicChecker):
         msgs = {
    -    'E0100': ('__init__ method is a generator',
    -              'init-is-generator',
    -              'Used when the special class method __init__ is turned into a '
    -              'generator by a yield in its body.'),
    -    'E0101': ('Explicit return in __init__',
    -              'return-in-init',
    -              'Used when the special class method __init__ has an explicit \
    -              return value.'),
    -    'E0102': ('%s already defined line %s',
    -              'function-redefined',
    -              'Used when a function / class / method is redefined.'),
    -    'E0103': ('%r not properly in loop',
    -              'not-in-loop',
    -              'Used when break or continue keywords are used outside a loop.'),
    -
    -    'E0104': ('Return outside function',
    -              'return-outside-function',
    -              'Used when a "return" statement is found outside a function or '
    -              'method.'),
    -    'E0105': ('Yield outside function',
    -              'yield-outside-function',
    -              'Used when a "yield" statement is found outside a function or '
    -              'method.'),
    -    'E0106': ('Return with argument inside generator',
    -              'return-arg-in-generator',
    -              'Used when a "return" statement with an argument is found '
    -              'outside in a generator function or method (e.g. with some '
    -              '"yield" statements).',
    -              {'maxversion': (3, 3)}),
    -    'E0107': ("Use of the non-existent %s operator",
    -              'nonexistent-operator',
    -              "Used when you attempt to use the C-style pre-increment or"
    -              "pre-decrement operator -- and ++, which doesn't exist in Python."),
    -    'E0108': ('Duplicate argument name %s in function definition',
    -              'duplicate-argument-name',
    -              'Duplicate argument names in function definitions are syntax'
    -              ' errors.'),
    -    'E0110': ('Abstract class with abstract methods instantiated',
    -              'abstract-class-instantiated',
    -              'Used when an abstract class with `abc.ABCMeta` as metaclass '
    -              'has abstract methods and is instantiated.',
    -              {'minversion': (3, 0)}),
    -    'W0120': ('Else clause on loop without a break statement',
    -              'useless-else-on-loop',
    -              'Loops should only have an else clause if they can exit early '
    -              'with a break statement, otherwise the statements under else '
    -              'should be on the same scope as the loop itself.'),
    -    }
    -
    -    def __init__(self, linter):
    -        _BasicChecker.__init__(self, linter)
    +        'E0100': ('__init__ method is a generator',
    +                  'init-is-generator',
    +                  'Used when the special class method __init__ is turned into a '
    +                  'generator by a yield in its body.'),
    +        'E0101': ('Explicit return in __init__',
    +                  'return-in-init',
    +                  'Used when the special class method __init__ has an explicit '
    +                  'return value.'),
    +        'E0102': ('%s already defined line %s',
    +                  'function-redefined',
    +                  'Used when a function / class / method is redefined.'),
    +        'E0103': ('%r not properly in loop',
    +                  'not-in-loop',
    +                  'Used when break or continue keywords are used outside a loop.'),
    +        'E0104': ('Return outside function',
    +                  'return-outside-function',
    +                  'Used when a "return" statement is found outside a function or '
    +                  'method.'),
    +        'E0105': ('Yield outside function',
    +                  'yield-outside-function',
    +                  'Used when a "yield" statement is found outside a function or '
    +                  'method.'),
    +        'E0106': ('Return with argument inside generator',
    +                  'return-arg-in-generator',
    +                  'Used when a "return" statement with an argument is found '
    +                  'outside in a generator function or method (e.g. with some '
    +                  '"yield" statements).',
    +                  {'maxversion': (3, 3)}),
    +        'E0107': ("Use of the non-existent %s operator",
    +                  'nonexistent-operator',
    +                  "Used when you attempt to use the C-style pre-increment or"
    +                  "pre-decrement operator -- and ++, which doesn't exist in Python."),
    +        'E0108': ('Duplicate argument name %s in function definition',
    +                  'duplicate-argument-name',
    +                  'Duplicate argument names in function definitions are syntax'
    +                  ' errors.'),
    +        'E0110': ('Abstract class %r with abstract methods instantiated',
    +                  'abstract-class-instantiated',
    +                  'Used when an abstract class with `abc.ABCMeta` as metaclass '
    +                  'has abstract methods and is instantiated.'),
    +        'W0120': ('Else clause on loop without a break statement',
    +                  'useless-else-on-loop',
    +                  'Loops should only have an else clause if they can exit early '
    +                  'with a break statement, otherwise the statements under else '
    +                  'should be on the same scope as the loop itself.'),
    +        }
     
         @check_messages('function-redefined')
         def visit_class(self, node):
    @@ -289,11 +319,11 @@ def visit_function(self, node):
                 else:
                     values = [r.value for r in returns]
                     # Are we returning anything but None from constructors
    -                if  [v for v in values if
    -                     not (v is None or
    -                          (isinstance(v, astroid.Const) and v.value is None) or
    -                          (isinstance(v, astroid.Name)  and v.name == 'None')
    -                          )]:
    +                if [v for v in values
    +                        if not (v is None or
    +                                (isinstance(v, astroid.Const) and v.value is None) or
    +                                (isinstance(v, astroid.Name)  and v.name == 'None')
    +                               )]:
                         self.add_message('return-in-init', node=node)
             elif node.is_generator():
                 # make sure we don't mix non-None returns and yields
    @@ -342,38 +372,39 @@ def visit_while(self, node):
         def visit_unaryop(self, node):
             """check use of the non-existent ++ and -- operator operator"""
             if ((node.op in '+-') and
    -            isinstance(node.operand, astroid.UnaryOp) and
    -            (node.operand.op == node.op)):
    +                isinstance(node.operand, astroid.UnaryOp) and
    +                (node.operand.op == node.op)):
                 self.add_message('nonexistent-operator', node=node, args=node.op*2)
     
         @check_messages('abstract-class-instantiated')
         def visit_callfunc(self, node):
             """ Check instantiating abstract class with
    -        abc.ABCMeta as metaclass. 
    +        abc.ABCMeta as metaclass.
             """
             try:
    -            infered = node.func.infer().next()
    +            infered = next(node.func.infer())
             except astroid.InferenceError:
                 return
             if not isinstance(infered, astroid.Class):
                 return
             # __init__ was called
             metaclass = infered.metaclass()
    +        abstract_methods = _has_abstract_methods(infered)
             if metaclass is None:
                 # Python 3.4 has `abc.ABC`, which won't be detected
                 # by ClassNode.metaclass()
                 for ancestor in infered.ancestors():
    -                if (ancestor.qname() == 'abc.ABC' and
    -                    has_abstract_methods(infered)):
    -
    -                    self.add_message('abstract-class-instantiated', node=node)
    +                if ancestor.qname() == 'abc.ABC' and abstract_methods:
    +                    self.add_message('abstract-class-instantiated',
    +                                     args=(infered.name, ),
    +                                     node=node)
                         break
                 return
    -        if (metaclass.qname() == 'abc.ABCMeta' and
    -            has_abstract_methods(infered)):
    +        if metaclass.qname() == 'abc.ABCMeta' and abstract_methods:
    +            self.add_message('abstract-class-instantiated',
    +                             args=(infered.name, ),
    +                             node=node)
     
    -            self.add_message('abstract-class-instantiated', node=node)
    -   
         def _check_else_on_loop(self, node):
             """Check that any loop with an else clause has a break statement."""
             if node.orelse and not _loop_exits_early(node):
    @@ -417,88 +448,78 @@ class BasicChecker(_BasicChecker):
     
         name = 'basic'
         msgs = {
    -    'W0101': ('Unreachable code',
    -              'unreachable',
    -              'Used when there is some code behind a "return" or "raise" \
    -              statement, which will never be accessed.'),
    -    'W0102': ('Dangerous default value %s as argument',
    -              'dangerous-default-value',
    -              'Used when a mutable value as list or dictionary is detected in \
    -              a default value for an argument.'),
    -    'W0104': ('Statement seems to have no effect',
    -              'pointless-statement',
    -              'Used when a statement doesn\'t have (or at least seems to) \
    -              any effect.'),
    -    'W0105': ('String statement has no effect',
    -              'pointless-string-statement',
    -              'Used when a string is used as a statement (which of course \
    -              has no effect). This is a particular case of W0104 with its \
    -              own message so you can easily disable it if you\'re using \
    -              those strings as documentation, instead of comments.'),
    -    'W0106': ('Expression "%s" is assigned to nothing',
    -              'expression-not-assigned',
    -              'Used when an expression that is not a function call is assigned\
    -              to nothing. Probably something else was intended.'),
    -    'W0108': ('Lambda may not be necessary',
    -              'unnecessary-lambda',
    -              'Used when the body of a lambda expression is a function call \
    -              on the same argument list as the lambda itself; such lambda \
    -              expressions are in all but a few cases replaceable with the \
    -              function being called in the body of the lambda.'),
    -    'W0109': ("Duplicate key %r in dictionary",
    -              'duplicate-key',
    -              "Used when a dictionary expression binds the same key multiple \
    -              times."),
    -    'W0122': ('Use of exec',
    -              'exec-used',
    -              'Used when you use the "exec" statement (function for Python 3), to discourage its \
    -              usage. That doesn\'t mean you can not use it !'),
    -    'W0123': ('Use of eval',
    -              'eval-used',
    -              'Used when you use the "eval" function, to discourage its '
    -              'usage. Consider using `ast.literal_eval` for safely evaluating '
    -              'strings containing Python expressions '
    -              'from untrusted sources. '),
    -    'W0141': ('Used builtin function %r',
    -              'bad-builtin',
    -              'Used when a black listed builtin function is used (see the '
    -              'bad-function option). Usual black listed functions are the ones '
    -              'like map, or filter , where Python offers now some cleaner '
    -              'alternative like list comprehension.'),
    -    'W0142': ('Used * or ** magic',
    -              'star-args',
    -              'Used when a function or method is called using `*args` or '
    -              '`**kwargs` to dispatch arguments. This doesn\'t improve '
    -              'readability and should be used with care.'),
    -    'W0150': ("%s statement in finally block may swallow exception",
    -              'lost-exception',
    -              "Used when a break or a return statement is found inside the \
    -              finally clause of a try...finally block: the exceptions raised \
    -              in the try clause will be silently swallowed instead of being \
    -              re-raised."),
    -    'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
    -              'assert-on-tuple',
    -              'A call of assert on a tuple will always evaluate to true if '
    -              'the tuple is not empty, and will always evaluate to false if '
    -              'it is.'),
    -    'W0121': ('Use raise ErrorClass(args) instead of raise ErrorClass, args.',
    -              'old-raise-syntax',
    -              "Used when the alternate raise syntax 'raise foo, bar' is used "
    -              "instead of 'raise foo(bar)'.",
    -              {'maxversion': (3, 0)}),
    -
    -    'C0121': ('Missing required attribute "%s"', # W0103
    -              'missing-module-attribute',
    -              'Used when an attribute required for modules is missing.'),
    -
    -    'E0109': ('Missing argument to reversed()',
    -              'missing-reversed-argument',
    -              'Used when reversed() builtin didn\'t receive an argument.'),
    -    'E0111': ('The first reversed() argument is not a sequence',
    -              'bad-reversed-sequence',
    -              'Used when the first argument to reversed() builtin '
    -              'isn\'t a sequence (does not implement __reversed__, '
    -              'nor __getitem__ and __len__'),
    +        'W0101': ('Unreachable code',
    +                  'unreachable',
    +                  'Used when there is some code behind a "return" or "raise" '
    +                  'statement, which will never be accessed.'),
    +        'W0102': ('Dangerous default value %s as argument',
    +                  'dangerous-default-value',
    +                  'Used when a mutable value as list or dictionary is detected in '
    +                  'a default value for an argument.'),
    +        'W0104': ('Statement seems to have no effect',
    +                  'pointless-statement',
    +                  'Used when a statement doesn\'t have (or at least seems to) '
    +                  'any effect.'),
    +        'W0105': ('String statement has no effect',
    +                  'pointless-string-statement',
    +                  'Used when a string is used as a statement (which of course '
    +                  'has no effect). This is a particular case of W0104 with its '
    +                  'own message so you can easily disable it if you\'re using '
    +                  'those strings as documentation, instead of comments.'),
    +        'W0106': ('Expression "%s" is assigned to nothing',
    +                  'expression-not-assigned',
    +                  'Used when an expression that is not a function call is assigned '
    +                  'to nothing. Probably something else was intended.'),
    +        'W0108': ('Lambda may not be necessary',
    +                  'unnecessary-lambda',
    +                  'Used when the body of a lambda expression is a function call '
    +                  'on the same argument list as the lambda itself; such lambda '
    +                  'expressions are in all but a few cases replaceable with the '
    +                  'function being called in the body of the lambda.'),
    +        'W0109': ("Duplicate key %r in dictionary",
    +                  'duplicate-key',
    +                  'Used when a dictionary expression binds the same key multiple '
    +                  'times.'),
    +        'W0122': ('Use of exec',
    +                  'exec-used',
    +                  'Used when you use the "exec" statement (function for Python '
    +                  '3), to discourage its usage. That doesn\'t '
    +                  'mean you can not use it !'),
    +        'W0123': ('Use of eval',
    +                  'eval-used',
    +                  'Used when you use the "eval" function, to discourage its '
    +                  'usage. Consider using `ast.literal_eval` for safely evaluating '
    +                  'strings containing Python expressions '
    +                  'from untrusted sources. '),
    +        'W0141': ('Used builtin function %r',
    +                  'bad-builtin',
    +                  'Used when a black listed builtin function is used (see the '
    +                  'bad-function option). Usual black listed functions are the ones '
    +                  'like map, or filter , where Python offers now some cleaner '
    +                  'alternative like list comprehension.'),
    +        'W0150': ("%s statement in finally block may swallow exception",
    +                  'lost-exception',
    +                  'Used when a break or a return statement is found inside the '
    +                  'finally clause of a try...finally block: the exceptions raised '
    +                  'in the try clause will be silently swallowed instead of being '
    +                  're-raised.'),
    +        'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
    +                  'assert-on-tuple',
    +                  'A call of assert on a tuple will always evaluate to true if '
    +                  'the tuple is not empty, and will always evaluate to false if '
    +                  'it is.'),
    +        'C0121': ('Missing required attribute "%s"', # W0103
    +                  'missing-module-attribute',
    +                  'Used when an attribute required for modules is missing.'),
    +
    +        'E0109': ('Missing argument to reversed()',
    +                  'missing-reversed-argument',
    +                  'Used when reversed() builtin didn\'t receive an argument.'),
    +        'E0111': ('The first reversed() argument is not a sequence',
    +                  'bad-reversed-sequence',
    +                  'Used when the first argument to reversed() builtin '
    +                  'isn\'t a sequence (does not implement __reversed__, '
    +                  'nor __getitem__ and __len__'),
     
         }
     
    @@ -507,14 +528,14 @@ class BasicChecker(_BasicChecker):
                      'metavar' : '',
                      'help' : 'Required attributes for module, separated by a '
                               'comma'}
    -                ),
    +               ),
                    ('bad-functions',
                     {'default' : BAD_FUNCTIONS,
                      'type' :'csv', 'metavar' : '',
                      'help' : 'List of builtins function names that should not be '
                               'used, separated by a comma'}
    -                ),
    -               )
    +               ),
    +              )
         reports = (('RP0101', 'Statistics by type', report_by_type_stats),)
     
         def __init__(self, linter):
    @@ -528,6 +549,7 @@ def open(self):
             self._tryfinallys = []
             self.stats = self.linter.add_stats(module=0, function=0,
                                                method=0, class_=0)
    +
         @check_messages('missing-module-attribute')
         def visit_module(self, node):
             """check module name, docstring and required arguments
    @@ -537,7 +559,7 @@ def visit_module(self, node):
                 if attr not in node:
                     self.add_message('missing-module-attribute', node=node, args=attr)
     
    -    def visit_class(self, node):
    +    def visit_class(self, node): # pylint: disable=unused-argument
             """check module name, docstring and redefinition
             increment branch counter
             """
    @@ -549,8 +571,20 @@ def visit_discard(self, node):
             """check for various kind of statements without effect"""
             expr = node.value
             if isinstance(expr, astroid.Const) and isinstance(expr.value,
    -                                                        basestring):
    +                                                          six.string_types):
                 # treat string statement in a separated message
    +            # Handle PEP-257 attribute docstrings.
    +            # An attribute docstring is defined as being a string right after
    +            # an assignment at the module level, class level or __init__ level.
    +            scope = expr.scope()
    +            if isinstance(scope, (astroid.Class, astroid.Module, astroid.Function)):
    +                if isinstance(scope, astroid.Function) and scope.name != '__init__':
    +                    pass
    +                else:
    +                    sibling = expr.previous_sibling()
    +                    if (sibling is not None and sibling.scope() is scope and
    +                            isinstance(sibling, astroid.Assign)):
    +                        return
                 self.add_message('pointless-string-statement', node=node)
                 return
             # ignore if this is :
    @@ -560,11 +594,12 @@ def visit_discard(self, node):
             # warn W0106 if we have any underlying function call (we can't predict
             # side effects), else pointless-statement
             if (isinstance(expr, (astroid.Yield, astroid.CallFunc)) or
    -            (isinstance(node.parent, astroid.TryExcept) and
    -             node.parent.body == [node])):
    +                (isinstance(node.parent, astroid.TryExcept) and
    +                 node.parent.body == [node])):
                 return
             if any(expr.nodes_of_class(astroid.CallFunc)):
    -            self.add_message('expression-not-assigned', node=node, args=expr.as_string())
    +            self.add_message('expression-not-assigned', node=node,
    +                             args=expr.as_string())
             else:
                 self.add_message('pointless-statement', node=node)
     
    @@ -597,15 +632,15 @@ def visit_lambda(self, node):
             ordinary_args = list(node.args.args)
             if node.args.kwarg:
                 if (not call.kwargs
    -                or not isinstance(call.kwargs, astroid.Name)
    -                or node.args.kwarg != call.kwargs.name):
    +                    or not isinstance(call.kwargs, astroid.Name)
    +                    or node.args.kwarg != call.kwargs.name):
                     return
             elif call.kwargs:
                 return
             if node.args.vararg:
                 if (not call.starargs
    -                or not isinstance(call.starargs, astroid.Name)
    -                or node.args.vararg != call.starargs.name):
    +                    or not isinstance(call.starargs, astroid.Name)
    +                    or node.args.vararg != call.starargs.name):
                     return
             elif call.starargs:
                 return
    @@ -613,11 +648,16 @@ def visit_lambda(self, node):
             # ordinary_args[i].name == call.args[i].name.
             if len(ordinary_args) != len(call.args):
                 return
    -        for i in xrange(len(ordinary_args)):
    +        for i in range(len(ordinary_args)):
                 if not isinstance(call.args[i], astroid.Name):
                     return
                 if node.args.args[i].name != call.args[i].name:
                     return
    +        if (isinstance(node.body.func, astroid.Getattr) and
    +                isinstance(node.body.func.expr, astroid.CallFunc)):
    +            # Chained call, the intermediate call might
    +            # return something else (but we don't check that, yet).
    +            return
             self.add_message('unnecessary-lambda', line=node.fromlineno, node=node)
     
         @check_messages('dangerous-default-value')
    @@ -626,22 +666,45 @@ def visit_function(self, node):
             variable names, max locals
             """
             self.stats[node.is_method() and 'method' or 'function'] += 1
    +        self._check_dangerous_default(node)
    +
    +    def _check_dangerous_default(self, node):
             # check for dangerous default values as arguments
    +        is_iterable = lambda n: isinstance(n, (astroid.List,
    +                                               astroid.Set,
    +                                               astroid.Dict))
             for default in node.args.defaults:
                 try:
    -                value = default.infer().next()
    +                value = next(default.infer())
                 except astroid.InferenceError:
                     continue
    -            builtins = astroid.bases.BUILTINS
    +
                 if (isinstance(value, astroid.Instance) and
    -                value.qname() in ['.'.join([builtins, x]) for x in ('set', 'dict', 'list')]):
    +                    value.qname() in DEFAULT_ARGUMENT_SYMBOLS):
    +
                     if value is default:
    -                    msg = default.as_string()
    -                elif type(value) is astroid.Instance:
    -                    msg = '%s (%s)' % (default.as_string(), value.qname())
    +                    msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
    +                elif type(value) is astroid.Instance or is_iterable(value):
    +                    # We are here in the following situation(s):
    +                    #   * a dict/set/list/tuple call which wasn't inferred
    +                    #     to a syntax node ({}, () etc.). This can happen
    +                    #     when the arguments are invalid or unknown to
    +                    #     the inference.
    +                    #   * a variable from somewhere else, which turns out to be a list
    +                    #     or a dict.
    +                    if is_iterable(default):
    +                        msg = value.pytype()
    +                    elif isinstance(default, astroid.CallFunc):
    +                        msg = '%s() (%s)' % (value.name, value.qname())
    +                    else:
    +                        msg = '%s (%s)' % (default.as_string(), value.qname())
                     else:
    -                    msg = '%s (%s)' % (default.as_string(), value.as_string())
    -                self.add_message('dangerous-default-value', node=node, args=(msg,))
    +                    # this argument is a name
    +                    msg = '%s (%s)' % (default.as_string(),
    +                                       DEFAULT_ARGUMENT_SYMBOLS[value.qname()])
    +                self.add_message('dangerous-default-value',
    +                                 node=node,
    +                                 args=(msg, ))
     
         @check_messages('unreachable', 'lost-exception')
         def visit_return(self, node):
    @@ -673,24 +736,20 @@ def visit_break(self, node):
             # 2 - Is it inside final body of a try...finally bloc ?
             self._check_not_in_finally(node, 'break', (astroid.For, astroid.While,))
     
    -    @check_messages('unreachable', 'old-raise-syntax')
    +    @check_messages('unreachable')
         def visit_raise(self, node):
             """check if the node has a right sibling (if so, that's some unreachable
             code)
             """
             self._check_unreachable(node)
    -        if sys.version_info >= (3, 0):
    -            return
    -        if node.exc is not None and node.inst is not None and node.tback is None:
    -            self.add_message('old-raise-syntax', node=node)
     
         @check_messages('exec-used')
         def visit_exec(self, node):
             """just print a warning on exec statements"""
             self.add_message('exec-used', node=node)
     
    -    @check_messages('bad-builtin', 'star-args', 'eval-used', 
    -                    'exec-used', 'missing-reversed-argument', 
    +    @check_messages('bad-builtin', 'eval-used',
    +                    'exec-used', 'missing-reversed-argument',
                         'bad-reversed-sequence')
         def visit_callfunc(self, node):
             """visit a CallFunc node -> check if this is not a blacklisted builtin
    @@ -710,24 +769,12 @@ def visit_callfunc(self, node):
                         self.add_message('eval-used', node=node)
                     if name in self.config.bad_functions:
                         self.add_message('bad-builtin', node=node, args=name)
    -        if node.starargs or node.kwargs:
    -            scope = node.scope()
    -            if isinstance(scope, astroid.Function):
    -                toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg),
    -                                                     (node.kwargs, scope.args.kwarg)) if n]
    -                if toprocess:
    -                    for cfnode, fargname in toprocess[:]:
    -                        if getattr(cfnode, 'name', None) == fargname:
    -                            toprocess.remove((cfnode, fargname))
    -                    if not toprocess:
    -                        return # star-args can be skipped
    -            self.add_message('star-args', node=node.func)
     
         @check_messages('assert-on-tuple')
         def visit_assert(self, node):
             """check the use of an assert statement on a tuple."""
             if node.fail is None and isinstance(node.test, astroid.Tuple) and \
    -           len(node.test.elts) == 2:
    +                len(node.test.elts) == 2:
                 self.add_message('assert-on-tuple', node=node)
     
         @check_messages('duplicate-key')
    @@ -745,7 +792,7 @@ def visit_tryfinally(self, node):
             """update try...finally flag"""
             self._tryfinallys.append(node)
     
    -    def leave_tryfinally(self, node):
    +    def leave_tryfinally(self, node): # pylint: disable=unused-argument
             """update try...finally flag"""
             self._tryfinallys.pop()
     
    @@ -772,7 +819,7 @@ def _check_not_in_finally(self, node, node_name, breaker_classes=()):
                     return
                 _node = _parent
                 _parent = _node.parent
    -    
    +
         def _check_reversed(self, node):
             """ check that the argument to `reversed` is a sequence """
             try:
    @@ -783,25 +830,25 @@ def _check_reversed(self, node):
                 if argument is astroid.YES:
                     return
                 if argument is None:
    -                # nothing was infered
    -                # try to see if we have iter()
    +                # Nothing was infered.
    +                # Try to see if we have iter().
                     if isinstance(node.args[0], astroid.CallFunc):
                         try:
    -                        func = node.args[0].func.infer().next()
    +                        func = next(node.args[0].func.infer())
                         except InferenceError:
                             return
                         if (getattr(func, 'name', None) == 'iter' and
    -                        is_builtin_object(func)):
    +                            is_builtin_object(func)):
                             self.add_message('bad-reversed-sequence', node=node)
                     return
     
                 if isinstance(argument, astroid.Instance):
    -                if (argument._proxied.name == 'dict' and 
    -                    is_builtin_object(argument._proxied)):
    -                     self.add_message('bad-reversed-sequence', node=node)
    -                     return
    +                if (argument._proxied.name == 'dict' and
    +                        is_builtin_object(argument._proxied)):
    +                    self.add_message('bad-reversed-sequence', node=node)
    +                    return
                     elif any(ancestor.name == 'dict' and is_builtin_object(ancestor)
    -                       for ancestor in argument._proxied.ancestors()):
    +                         for ancestor in argument._proxied.ancestors()):
                         # mappings aren't accepted by reversed()
                         self.add_message('bad-reversed-sequence', node=node)
                         return
    @@ -814,10 +861,10 @@ def _check_reversed(self, node):
                                 break
                         else:
                             break
    -                else:             
    -                    # check if it is a .deque. It doesn't seem that
    -                    # we can retrieve special methods 
    -                    # from C implemented constructs    
    +                else:
    +                    # Check if it is a .deque. It doesn't seem that
    +                    # we can retrieve special methods
    +                    # from C implemented constructs.
                         if argument._proxied.qname().endswith(".deque"):
                             return
                         self.add_message('bad-reversed-sequence', node=node)
    @@ -840,62 +887,61 @@ def _check_reversed(self, node):
     
     def _create_naming_options():
         name_options = []
    -    for name_type, (rgx, human_readable_name) in _NAME_TYPES.iteritems():
    +    for name_type, (rgx, human_readable_name) in six.iteritems(_NAME_TYPES):
             name_type = name_type.replace('_', '-')
             name_options.append((
    -            '%s-rgx' % (name_type,), 
    +            '%s-rgx' % (name_type,),
                 {'default': rgx, 'type': 'regexp', 'metavar': '',
                  'help': 'Regular expression matching correct %s names' % (human_readable_name,)}))
             name_options.append((
    -            '%s-name-hint' % (name_type,), 
    +            '%s-name-hint' % (name_type,),
                 {'default': rgx.pattern, 'type': 'string', 'metavar': '',
                  'help': 'Naming hint for %s names' % (human_readable_name,)}))
    -
    -    return tuple(name_options) 
    +    return tuple(name_options)
     
     class NameChecker(_BasicChecker):
         msgs = {
    -    'C0102': ('Black listed name "%s"',
    -              'blacklisted-name',
    -              'Used when the name is listed in the black list (unauthorized \
    -              names).'),
    -    'C0103': ('Invalid %s name "%s"%s',
    -              'invalid-name',
    -              'Used when the name doesn\'t match the regular expression \
    -              associated to its type (constant, variable, class...).'),
    +        'C0102': ('Black listed name "%s"',
    +                  'blacklisted-name',
    +                  'Used when the name is listed in the black list (unauthorized '
    +                  'names).'),
    +        'C0103': ('Invalid %s name "%s"%s',
    +                  'invalid-name',
    +                  'Used when the name doesn\'t match the regular expression '
    +                  'associated to its type (constant, variable, class...).'),
         }
     
    -    options = (# XXX use set
    -               ('good-names',
    +    options = (('good-names',
                     {'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
                      'type' :'csv', 'metavar' : '',
                      'help' : 'Good variable names which should always be accepted,'
                               ' separated by a comma'}
    -                ),
    +               ),
                    ('bad-names',
                     {'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
                      'type' :'csv', 'metavar' : '',
                      'help' : 'Bad variable names which should always be refused, '
                               'separated by a comma'}
    -                ),
    +               ),
                    ('name-group',
                     {'default' : (),
                      'type' :'csv', 'metavar' : '',
                      'help' : ('Colon-delimited sets of names that determine each'
                                ' other\'s naming style when the name regexes'
                                ' allow several styles.')}
    -                ),
    +               ),
                    ('include-naming-hint',
                     {'default': False, 'type' : 'yn', 'metavar' : '',
                      'help': 'Include a hint for the correct naming format with invalid-name'}
    -                ),
    -               ) + _create_naming_options()
    +               ),
    +              ) + _create_naming_options()
     
     
         def __init__(self, linter):
             _BasicChecker.__init__(self, linter)
             self._name_category = {}
             self._name_group = {}
    +        self._bad_names = {}
     
         def open(self):
             self.stats = self.linter.add_stats(badname_module=0,
    @@ -913,11 +959,30 @@ def open(self):
         @check_messages('blacklisted-name', 'invalid-name')
         def visit_module(self, node):
             self._check_name('module', node.name.split('.')[-1], node)
    +        self._bad_names = {}
    +
    +    def leave_module(self, node): # pylint: disable=unused-argument
    +        for all_groups in six.itervalues(self._bad_names):
    +            if len(all_groups) < 2:
    +                continue
    +            groups = collections.defaultdict(list)
    +            min_warnings = sys.maxsize
    +            for group in six.itervalues(all_groups):
    +                groups[len(group)].append(group)
    +                min_warnings = min(len(group), min_warnings)
    +            if len(groups[min_warnings]) > 1:
    +                by_line = sorted(groups[min_warnings],
    +                                 key=lambda group: min(warning[0].lineno for warning in group))
    +                warnings = itertools.chain(*by_line[1:])
    +            else:
    +                warnings = groups[min_warnings][0]
    +            for args in warnings:
    +                self._raise_name_warning(*args)
     
         @check_messages('blacklisted-name', 'invalid-name')
         def visit_class(self, node):
             self._check_name('class', node.name, node)
    -        for attr, anodes in node.instance_attrs.iteritems():
    +        for attr, anodes in six.iteritems(node.instance_attrs):
                 if not list(node.instance_attr_ancestors(attr)):
                     self._check_name('attr', attr, anodes[0])
     
    @@ -925,10 +990,15 @@ def visit_class(self, node):
         def visit_function(self, node):
             # Do not emit any warnings if the method is just an implementation
             # of a base class method.
    -        if node.is_method() and overrides_a_method(node.parent.frame(), node.name):
    -            return
    +        confidence = HIGH
    +        if node.is_method():
    +            if overrides_a_method(node.parent.frame(), node.name):
    +                return
    +            confidence = (INFERENCE if has_known_bases(node.parent.frame())
    +                          else INFERENCE_FAILURE)
    +
             self._check_name(_determine_function_name_type(node),
    -                         node.name, node)
    +                         node.name, node, confidence)
             # Check argument names
             args = node.args.args
             if args is not None:
    @@ -951,13 +1021,17 @@ def visit_assname(self, node):
                     if isinstance(safe_infer(ass_type.value), astroid.Class):
                         self._check_name('class', node.name, node)
                     else:
    -                    self._check_name('const', node.name, node)
    +                    if not _redefines_import(node):
    +                        # Don't emit if the name redefines an import
    +                        # in an ImportError except handler.
    +                        self._check_name('const', node.name, node)
                 elif isinstance(ass_type, astroid.ExceptHandler):
                     self._check_name('variable', node.name, node)
             elif isinstance(frame, astroid.Function):
                 # global introduced variable aren't in the function locals
                 if node.name in frame and node.name not in frame.argnames():
    -                self._check_name('variable', node.name, node)
    +                if not _redefines_import(node):
    +                    self._check_name('variable', node.name, node)
             elif isinstance(frame, astroid.Class):
                 if not list(frame.local_attr_ancestors(node.name)):
                     self._check_name('class_attribute', node.name, node)
    @@ -973,12 +1047,16 @@ def _recursive_check_names(self, args, node):
         def _find_name_group(self, node_type):
             return self._name_group.get(node_type, node_type)
     
    -    def _is_multi_naming_match(self, match):
    -        return (match is not None and
    -                match.lastgroup is not None and
    -                match.lastgroup not in EXEMPT_NAME_CATEGORIES)
    +    def _raise_name_warning(self, node, node_type, name, confidence):
    +        type_label = _NAME_TYPES[node_type][1]
    +        hint = ''
    +        if self.config.include_naming_hint:
    +            hint = ' (hint: %s)' % (getattr(self.config, node_type + '_name_hint'))
    +        self.add_message('invalid-name', node=node, args=(type_label, name, hint),
    +                         confidence=confidence)
    +        self.stats['badname_' + node_type] += 1
     
    -    def _check_name(self, node_type, name, node):
    +    def _check_name(self, node_type, name, node, confidence=HIGH):
             """check for a name using the type's regexp"""
             if is_inside_except(node):
                 clobbering, _ = clobber_in_except(node)
    @@ -993,48 +1071,42 @@ def _check_name(self, node_type, name, node):
             regexp = getattr(self.config, node_type + '_rgx')
             match = regexp.match(name)
     
    -        if self._is_multi_naming_match(match):
    +        if _is_multi_naming_match(match, node_type, confidence):
                 name_group = self._find_name_group(node_type)
    -            if name_group not in self._name_category:
    -                self._name_category[name_group] = match.lastgroup
    -            elif self._name_category[name_group] != match.lastgroup:
    -                match = None
    +            bad_name_group = self._bad_names.setdefault(name_group, {})
    +            warnings = bad_name_group.setdefault(match.lastgroup, [])
    +            warnings.append((node, node_type, name, confidence))
     
             if match is None:
    -            type_label = _NAME_TYPES[node_type][1]
    -            hint = ''
    -            if self.config.include_naming_hint:
    -                hint = ' (hint: %s)' % (getattr(self.config, node_type + '_name_hint'))
    -            self.add_message('invalid-name', node=node, args=(type_label, name, hint))
    -            self.stats['badname_' + node_type] += 1
    +            self._raise_name_warning(node, node_type, name, confidence)
     
     
     class DocStringChecker(_BasicChecker):
         msgs = {
    -    'C0111': ('Missing %s docstring', # W0131
    -              'missing-docstring',
    -              'Used when a module, function, class or method has no docstring.\
    -              Some special methods like __init__ doesn\'t necessary require a \
    -              docstring.'),
    -    'C0112': ('Empty %s docstring', # W0132
    -              'empty-docstring',
    -              'Used when a module, function, class or method has an empty \
    -              docstring (it would be too easy ;).'),
    -    }
    +        'C0111': ('Missing %s docstring', # W0131
    +                  'missing-docstring',
    +                  'Used when a module, function, class or method has no docstring.'
    +                  'Some special methods like __init__ doesn\'t necessary require a '
    +                  'docstring.'),
    +        'C0112': ('Empty %s docstring', # W0132
    +                  'empty-docstring',
    +                  'Used when a module, function, class or method has an empty '
    +                  'docstring (it would be too easy ;).'),
    +        }
         options = (('no-docstring-rgx',
                     {'default' : NO_REQUIRED_DOC_RGX,
                      'type' : 'regexp', 'metavar' : '',
                      'help' : 'Regular expression which should only match '
                               'function or class names that do not require a '
                               'docstring.'}
    -                ),
    +               ),
                    ('docstring-min-length',
                     {'default' : -1,
                      'type' : 'int', 'metavar' : '',
                      'help': ('Minimum line length for functions/classes that'
                               ' require docstrings, shorter ones are exempt.')}
    -                ),
    -               )
    +               ),
    +              )
     
     
         def open(self):
    @@ -1050,12 +1122,15 @@ def visit_module(self, node):
         def visit_class(self, node):
             if self.config.no_docstring_rgx.match(node.name) is None:
                 self._check_docstring('class', node)
    +
         @check_messages('missing-docstring', 'empty-docstring')
         def visit_function(self, node):
             if self.config.no_docstring_rgx.match(node.name) is None:
                 ftype = node.is_method() and 'method' or 'function'
                 if isinstance(node.parent.frame(), astroid.Class):
                     overridden = False
    +                confidence = (INFERENCE if has_known_bases(node.parent.frame())
    +                              else INFERENCE_FAILURE)
                     # check if node is from a method overridden by its ancestor
                     for ancestor in node.parent.frame().ancestors():
                         if node.name in ancestor and \
    @@ -1063,11 +1138,13 @@ def visit_function(self, node):
                             overridden = True
                             break
                     self._check_docstring(ftype, node,
    -                                      report_missing=not overridden)
    +                                      report_missing=not overridden,
    +                                      confidence=confidence)
                 else:
                     self._check_docstring(ftype, node)
     
    -    def _check_docstring(self, node_type, node, report_missing=True):
    +    def _check_docstring(self, node_type, node, report_missing=True,
    +                         confidence=HIGH):
             """check the node has a non empty docstring"""
             docstring = node.doc
             if docstring is None:
    @@ -1077,15 +1154,33 @@ def _check_docstring(self, node_type, node, report_missing=True):
                     lines = node.body[-1].lineno - node.body[0].lineno + 1
                 else:
                     lines = 0
    +
    +            if node_type == 'module' and not lines:
    +                # If the module has no body, there's no reason
    +                # to require a docstring.
    +                return
                 max_lines = self.config.docstring_min_length
     
                 if node_type != 'module' and max_lines > -1 and lines < max_lines:
                     return
                 self.stats['undocumented_'+node_type] += 1
    -            self.add_message('missing-docstring', node=node, args=(node_type,))
    +            if (node.body and isinstance(node.body[0], astroid.Discard) and
    +                    isinstance(node.body[0].value, astroid.CallFunc)):
    +                # Most likely a string with a format call. Let's see.
    +                func = safe_infer(node.body[0].value.func)
    +                if (isinstance(func, astroid.BoundMethod)
    +                        and isinstance(func.bound, astroid.Instance)):
    +                    # Strings in Python 3, others in Python 2.
    +                    if PY3K and func.bound.name == 'str':
    +                        return
    +                    elif func.bound.name in ('str', 'unicode', 'bytes'):
    +                        return
    +            self.add_message('missing-docstring', node=node, args=(node_type,),
    +                             confidence=confidence)
             elif not docstring.strip():
                 self.stats['undocumented_'+node_type] += 1
    -            self.add_message('empty-docstring', node=node, args=(node_type,))
    +            self.add_message('empty-docstring', node=node, args=(node_type,),
    +                             confidence=confidence)
     
     
     class PassChecker(_BasicChecker):
    @@ -1094,7 +1189,7 @@ class PassChecker(_BasicChecker):
                           'unnecessary-pass',
                           'Used when a "pass" statement that can be avoided is '
                           'encountered.'),
    -            }
    +           }
         @check_messages('unnecessary-pass')
         def visit_pass(self, node):
             if len(node.parent.child_sequence(node)) > 1:
    @@ -1114,7 +1209,7 @@ class LambdaForComprehensionChecker(_BasicChecker):
                           '"filter". It could be clearer as a list '
                           'comprehension or generator expression.',
                           {'maxversion': (3, 0)}),
    -            }
    +           }
     
         @check_messages('deprecated-lambda')
         def visit_callfunc(self, node):
    @@ -1127,7 +1222,7 @@ def visit_callfunc(self, node):
                 return
             infered = safe_infer(node.func)
             if (is_builtin_object(infered)
    -            and infered.name in ['map', 'filter']):
    +                and infered.name in ['map', 'filter']):
                 self.add_message('deprecated-lambda', node=node)
     
     
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/classes.py b/pymode/libs/pylint/checkers/classes.py
    similarity index 65%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/classes.py
    rename to pymode/libs/pylint/checkers/classes.py
    index f5e2783f..87e3bcfe 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/classes.py
    +++ b/pymode/libs/pylint/checkers/classes.py
    @@ -18,15 +18,20 @@
     from __future__ import generators
     
     import sys
    +from collections import defaultdict
     
     import astroid
    -from astroid import YES, Instance, are_exclusive, AssAttr
    -from astroid.bases import Generator
    +from astroid import YES, Instance, are_exclusive, AssAttr, Class
    +from astroid.bases import Generator, BUILTINS
    +from astroid.inference import InferenceContext
     
     from pylint.interfaces import IAstroidChecker
     from pylint.checkers import BaseChecker
    -from pylint.checkers.utils import (PYMETHODS, overrides_a_method,
    -    check_messages, is_attr_private, is_attr_protected, node_frame_class)
    +from pylint.checkers.utils import (
    +    PYMETHODS, overrides_a_method, check_messages, is_attr_private,
    +    is_attr_protected, node_frame_class, safe_infer, is_builtin_object,
    +    decorated_with_property, unimplemented_abstract_methods)
    +import six
     
     if sys.version_info >= (3, 0):
         NEXT_METHOD = '__next__'
    @@ -34,6 +39,32 @@
         NEXT_METHOD = 'next'
     ITER_METHODS = ('__iter__', '__getitem__')
     
    +def _called_in_methods(func, klass, methods):
    +    """ Check if the func was called in any of the given methods,
    +    belonging to the *klass*. Returns True if so, False otherwise.
    +    """
    +    if not isinstance(func, astroid.Function):
    +        return False
    +    for method in methods:
    +        try:
    +            infered = klass.getattr(method)
    +        except astroid.NotFoundError:
    +            continue
    +        for infer_method in infered:
    +            for callfunc in infer_method.nodes_of_class(astroid.CallFunc):
    +                try:
    +                    bound = next(callfunc.func.infer())
    +                except (astroid.InferenceError, StopIteration):
    +                    continue
    +                if not isinstance(bound, astroid.BoundMethod):
    +                    continue
    +                func_obj = bound._proxied
    +                if isinstance(func_obj, astroid.UnboundMethod):
    +                    func_obj = func_obj._proxied
    +                if func_obj.name == func.name:
    +                    return True
    +    return False
    +
     def class_is_abstract(node):
         """return true if the given class node should be considered as an abstract
         class
    @@ -44,13 +75,41 @@ def class_is_abstract(node):
                     return True
         return False
     
    +def _is_attribute_property(name, klass):
    +    """ Check if the given attribute *name* is a property
    +    in the given *klass*.
    +
    +    It will look for `property` calls or for functions
    +    with the given name, decorated by `property` or `property`
    +    subclasses.
    +    Returns ``True`` if the name is a property in the given klass,
    +    ``False`` otherwise.
    +    """
    +
    +    try:
    +        attributes = klass.getattr(name)
    +    except astroid.NotFoundError:
    +        return False
    +    property_name = "{0}.property".format(BUILTINS)
    +    for attr in attributes:
    +        try:
    +            infered = next(attr.infer())
    +        except astroid.InferenceError:
    +            continue
    +        if (isinstance(infered, astroid.Function) and
    +                decorated_with_property(infered)):
    +            return True
    +        if infered.pytype() == property_name:
    +            return True
    +    return False
    +
     
     MSGS = {
         'F0202': ('Unable to check methods signature (%s / %s)',
                   'method-check-failed',
    -              'Used when PyLint has been unable to check methods signature \
    -              compatibility for an unexpected reason. Please report this kind \
    -              if you don\'t make sense of it.'),
    +              'Used when Pylint has been unable to check methods signature '
    +              'compatibility for an unexpected reason. Please report this kind '
    +              'if you don\'t make sense of it.'),
     
         'E0202': ('An attribute defined in %s line %s hides this method',
                   'method-hidden',
    @@ -59,35 +118,35 @@ def class_is_abstract(node):
                   'client code.'),
         'E0203': ('Access to member %r before its definition line %s',
                   'access-member-before-definition',
    -              'Used when an instance member is accessed before it\'s actually\
    -              assigned.'),
    +              'Used when an instance member is accessed before it\'s actually '
    +              'assigned.'),
         'W0201': ('Attribute %r defined outside __init__',
                   'attribute-defined-outside-init',
    -              'Used when an instance attribute is defined outside the __init__\
    -              method.'),
    +              'Used when an instance attribute is defined outside the __init__ '
    +              'method.'),
     
         'W0212': ('Access to a protected member %s of a client class', # E0214
                   'protected-access',
    -              'Used when a protected member (i.e. class member with a name \
    -              beginning with an underscore) is access outside the class or a \
    -              descendant of the class where it\'s defined.'),
    +              'Used when a protected member (i.e. class member with a name '
    +              'beginning with an underscore) is access outside the class or a '
    +              'descendant of the class where it\'s defined.'),
     
         'E0211': ('Method has no argument',
                   'no-method-argument',
    -              'Used when a method which should have the bound instance as \
    -              first argument has no argument defined.'),
    +              'Used when a method which should have the bound instance as '
    +              'first argument has no argument defined.'),
         'E0213': ('Method should have "self" as first argument',
                   'no-self-argument',
    -              'Used when a method has an attribute different the "self" as\
    -              first argument. This is considered as an error since this is\
    -              a so common convention that you shouldn\'t break it!'),
    -    'C0202': ('Class method %s should have %s as first argument', # E0212
    +              'Used when a method has an attribute different the "self" as '
    +              'first argument. This is considered as an error since this is '
    +              'a so common convention that you shouldn\'t break it!'),
    +    'C0202': ('Class method %s should have %s as first argument',
                   'bad-classmethod-argument',
                   'Used when a class method has a first argument named differently '
                   'than the value specified in valid-classmethod-first-arg option '
                   '(default to "cls"), recommended to easily differentiate them '
                   'from regular instance methods.'),
    -    'C0203': ('Metaclass method %s should have %s as first argument', # E0214
    +    'C0203': ('Metaclass method %s should have %s as first argument',
                   'bad-mcs-method-argument',
                   'Used when a metaclass method has a first agument named '
                   'differently than the value specified in valid-classmethod-first'
    @@ -105,69 +164,77 @@ def class_is_abstract(node):
                   'Used when a static method has "self" or a value specified in '
                   'valid-classmethod-first-arg option or '
                   'valid-metaclass-classmethod-first-arg option as first argument.'
    -              ),
    +             ),
         'R0201': ('Method could be a function',
                   'no-self-use',
    -              'Used when a method doesn\'t use its bound instance, and so could\
    -              be written as a function.'
    -              ),
    +              'Used when a method doesn\'t use its bound instance, and so could '
    +              'be written as a function.'
    +             ),
     
         'E0221': ('Interface resolved to %s is not a class',
                   'interface-is-not-class',
    -              'Used when a class claims to implement an interface which is not \
    -              a class.'),
    +              'Used when a class claims to implement an interface which is not '
    +              'a class.'),
         'E0222': ('Missing method %r from %s interface',
                   'missing-interface-method',
    -              'Used when a method declared in an interface is missing from a \
    -              class implementing this interface'),
    -    'W0221': ('Arguments number differs from %s method',
    +              'Used when a method declared in an interface is missing from a '
    +              'class implementing this interface'),
    +    'W0221': ('Arguments number differs from %s %r method',
                   'arguments-differ',
    -              'Used when a method has a different number of arguments than in \
    -              the implemented interface or in an overridden method.'),
    -    'W0222': ('Signature differs from %s method',
    +              'Used when a method has a different number of arguments than in '
    +              'the implemented interface or in an overridden method.'),
    +    'W0222': ('Signature differs from %s %r method',
                   'signature-differs',
    -              'Used when a method signature is different than in the \
    -              implemented interface or in an overridden method.'),
    +              'Used when a method signature is different than in the '
    +              'implemented interface or in an overridden method.'),
         'W0223': ('Method %r is abstract in class %r but is not overridden',
                   'abstract-method',
    -              'Used when an abstract method (i.e. raise NotImplementedError) is \
    -              not overridden in concrete class.'
    -              ),
    -    'F0220': ('failed to resolve interfaces implemented by %s (%s)', # W0224
    +              'Used when an abstract method (i.e. raise NotImplementedError) is '
    +              'not overridden in concrete class.'
    +             ),
    +    'F0220': ('failed to resolve interfaces implemented by %s (%s)',
                   'unresolved-interface',
    -              'Used when a PyLint as failed to find interfaces implemented by \
    -               a class'),
    +              'Used when a Pylint as failed to find interfaces implemented by '
    +              ' a class'),
     
     
         'W0231': ('__init__ method from base class %r is not called',
                   'super-init-not-called',
    -              'Used when an ancestor class method has an __init__ method \
    -              which is not called by a derived class.'),
    +              'Used when an ancestor class method has an __init__ method '
    +              'which is not called by a derived class.'),
         'W0232': ('Class has no __init__ method',
                   'no-init',
    -              'Used when a class has no __init__ method, neither its parent \
    -              classes.'),
    +              'Used when a class has no __init__ method, neither its parent '
    +              'classes.'),
         'W0233': ('__init__ method from a non direct base class %r is called',
                   'non-parent-init-called',
    -              'Used when an __init__ method is called on a class which is not \
    -              in the direct ancestors for the analysed class.'),
    +              'Used when an __init__ method is called on a class which is not '
    +              'in the direct ancestors for the analysed class.'),
         'W0234': ('__iter__ returns non-iterator',
                   'non-iterator-returned',
    -              'Used when an __iter__ method returns something which is not an \
    -               iterable (i.e. has no `%s` method)' % NEXT_METHOD),
    +              'Used when an __iter__ method returns something which is not an '
    +               'iterable (i.e. has no `%s` method)' % NEXT_METHOD),
         'E0235': ('__exit__ must accept 3 arguments: type, value, traceback',
                   'bad-context-manager',
    -              'Used when the __exit__ special method, belonging to a \
    -               context manager, does not accept 3 arguments \
    -               (type, value, traceback).'),
    +              'Used when the __exit__ special method, belonging to a '
    +              'context manager, does not accept 3 arguments '
    +              '(type, value, traceback).'),
         'E0236': ('Invalid object %r in __slots__, must contain '
                   'only non empty strings',
                   'invalid-slots-object',
                   'Used when an invalid (non-string) object occurs in __slots__.'),
    +    'E0237': ('Assigning to attribute %r not defined in class slots',
    +              'assigning-non-slot',
    +              'Used when assigning to an attribute not defined '
    +              'in the class slots.'),
         'E0238': ('Invalid __slots__ object',
                   'invalid-slots',
                   'Used when an invalid __slots__ is found in class. '
    -              'Only a string, an iterable or a sequence is permitted.')
    +              'Only a string, an iterable or a sequence is permitted.'),
    +    'E0239': ('Inheriting %r, which is not a class.',
    +              'inherit-non-class',
    +              'Used when a class inherits from something which is not a '
    +              'class.'),
     
     
         }
    @@ -193,45 +260,52 @@ class ClassChecker(BaseChecker):
         # configuration options
         options = (('ignore-iface-methods',
                     {'default' : (#zope interface
    -        'isImplementedBy', 'deferred', 'extends', 'names',
    -        'namesAndDescriptions', 'queryDescriptionFor', 'getBases',
    -        'getDescriptionFor', 'getDoc', 'getName', 'getTaggedValue',
    -        'getTaggedValueTags', 'isEqualOrExtendedBy', 'setTaggedValue',
    -        'isImplementedByInstancesOf',
    -        # twisted
    -        'adaptWith',
    -        # logilab.common interface
    -        'is_implemented_by'),
    +                    'isImplementedBy', 'deferred', 'extends', 'names',
    +                    'namesAndDescriptions', 'queryDescriptionFor', 'getBases',
    +                    'getDescriptionFor', 'getDoc', 'getName', 'getTaggedValue',
    +                    'getTaggedValueTags', 'isEqualOrExtendedBy', 'setTaggedValue',
    +                    'isImplementedByInstancesOf',
    +                    # twisted
    +                    'adaptWith',
    +                    # logilab.common interface
    +                    'is_implemented_by'),
                      'type' : 'csv',
                      'metavar' : '',
                      'help' : 'List of interface methods to ignore, \
     separated by a comma. This is used for instance to not check methods defines \
     in Zope\'s Interface base class.'}
    -                ),
    -
    +               ),
                    ('defining-attr-methods',
                     {'default' : ('__init__', '__new__', 'setUp'),
                      'type' : 'csv',
                      'metavar' : '',
                      'help' : 'List of method names used to declare (i.e. assign) \
     instance attributes.'}
    -                ),
    +               ),
                    ('valid-classmethod-first-arg',
                     {'default' : ('cls',),
                      'type' : 'csv',
                      'metavar' : '',
                      'help' : 'List of valid names for the first argument in \
     a class method.'}
    -                ),
    +               ),
                    ('valid-metaclass-classmethod-first-arg',
                     {'default' : ('mcs',),
                      'type' : 'csv',
                      'metavar' : '',
                      'help' : 'List of valid names for the first argument in \
     a metaclass class method.'}
    -                ),
    -
    -               )
    +               ),
    +               ('exclude-protected',
    +                {
    +                    'default': (
    +                        # namedtuple public API.
    +                        '_asdict', '_fields', '_replace', '_source', '_make'),
    +                    'type': 'csv',
    +                    'metavar': '',
    +                    'help': ('List of member names, which should be excluded '
    +                             'from the protected access warning.')}
    +               ))
     
         def __init__(self, linter=None):
             BaseChecker.__init__(self, linter)
    @@ -242,7 +316,7 @@ def __init__(self, linter=None):
         def visit_class(self, node):
             """init visit variable _accessed and check interfaces
             """
    -        self._accessed.append({})
    +        self._accessed.append(defaultdict(list))
             self._check_bases_classes(node)
             self._check_interfaces(node)
             # if not an interface, exception, metaclass
    @@ -252,8 +326,27 @@ def visit_class(self, node):
                 except astroid.NotFoundError:
                     self.add_message('no-init', args=node, node=node)
             self._check_slots(node)
    +        self._check_proper_bases(node)
    +
    +    @check_messages('inherit-non-class')
    +    def _check_proper_bases(self, node):
    +        """
    +        Detect that a class inherits something which is not
    +        a class or a type.
    +        """
    +        for base in node.bases:
    +            ancestor = safe_infer(base)
    +            if ancestor in (YES, None):
    +                continue
    +            if (isinstance(ancestor, astroid.Instance) and
    +                    ancestor.is_subtype_of('%s.type' % (BUILTINS,))):
    +                continue
    +            if not isinstance(ancestor, astroid.Class):
    +                self.add_message('inherit-non-class',
    +                                 args=base.as_string(), node=node)
     
    -    @check_messages('access-member-before-definition', 'attribute-defined-outside-init')
    +    @check_messages('access-member-before-definition',
    +                    'attribute-defined-outside-init')
         def leave_class(self, cnode):
             """close a class node:
             check that instance attributes are defined in __init__ and check
    @@ -267,33 +360,45 @@ def leave_class(self, cnode):
             if not self.linter.is_message_enabled('attribute-defined-outside-init'):
                 return
             defining_methods = self.config.defining_attr_methods
    -        for attr, nodes in cnode.instance_attrs.iteritems():
    +        current_module = cnode.root()
    +        for attr, nodes in six.iteritems(cnode.instance_attrs):
    +            # skip nodes which are not in the current module and it may screw up
    +            # the output, while it's not worth it
                 nodes = [n for n in nodes if not
    -                    isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))]
    +                     isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))
    +                     and n.root() is current_module]
                 if not nodes:
                     continue # error detected by typechecking
    -            attr_defined = False
                 # check if any method attr is defined in is a defining method
    -            for node in nodes:
    -                if node.frame().name in defining_methods:
    -                    attr_defined = True
    -            if not attr_defined:
    -                # check attribute is defined in a parent's __init__
    -                for parent in cnode.instance_attr_ancestors(attr):
    -                    attr_defined = False
    -                    # check if any parent method attr is defined in is a defining method
    -                    for node in parent.instance_attrs[attr]:
    -                        if node.frame().name in defining_methods:
    -                            attr_defined = True
    -                    if attr_defined:
    -                        # we're done :)
    -                        break
    -                else:
    -                    # check attribute is defined as a class attribute
    -                    try:
    -                        cnode.local_attr(attr)
    -                    except astroid.NotFoundError:
    -                        self.add_message('attribute-defined-outside-init', args=attr, node=node)
    +            if any(node.frame().name in defining_methods
    +                   for node in nodes):
    +                continue
    +
    +            # check attribute is defined in a parent's __init__
    +            for parent in cnode.instance_attr_ancestors(attr):
    +                attr_defined = False
    +                # check if any parent method attr is defined in is a defining method
    +                for node in parent.instance_attrs[attr]:
    +                    if node.frame().name in defining_methods:
    +                        attr_defined = True
    +                if attr_defined:
    +                    # we're done :)
    +                    break
    +            else:
    +                # check attribute is defined as a class attribute
    +                try:
    +                    cnode.local_attr(attr)
    +                except astroid.NotFoundError:
    +                    for node in nodes:
    +                        if node.frame().name not in defining_methods:
    +                            # If the attribute was set by a callfunc in any
    +                            # of the defining methods, then don't emit
    +                            # the warning.
    +                            if _called_in_methods(node.frame(), cnode,
    +                                                  defining_methods):
    +                                continue
    +                            self.add_message('attribute-defined-outside-init',
    +                                             args=attr, node=node)
     
         def visit_function(self, node):
             """check method arguments, overriding"""
    @@ -334,8 +439,14 @@ def visit_function(self, node):
             # check if the method is hidden by an attribute
             try:
                 overridden = klass.instance_attr(node.name)[0] # XXX
    -            args = (overridden.root().name, overridden.fromlineno)
    -            self.add_message('method-hidden', args=args, node=node)
    +            overridden_frame = overridden.frame()
    +            if (isinstance(overridden_frame, astroid.Function)
    +                    and overridden_frame.type == 'method'):
    +                overridden_frame = overridden_frame.parent.frame()
    +            if (isinstance(overridden_frame, Class)
    +                    and klass.is_subtype_of(overridden_frame.qname())):
    +                args = (overridden.root().name, overridden.fromlineno)
    +                self.add_message('method-hidden', args=args, node=node)
             except astroid.NotFoundError:
                 pass
     
    @@ -385,7 +496,7 @@ def _check_slots_elt(self, elt):
                 if infered is YES:
                     continue
                 if (not isinstance(infered, astroid.Const) or
    -                not isinstance(infered.value, str)):
    +                    not isinstance(infered.value, six.string_types)):
                     self.add_message('invalid-slots-object',
                                      args=infered.as_string(),
                                      node=elt)
    @@ -403,7 +514,7 @@ def _check_iter(self, node):
     
             for infered_node in infered:
                 if (infered_node is YES
    -                or isinstance(infered_node, Generator)):
    +                    or isinstance(infered_node, Generator)):
                     continue
                 if isinstance(infered_node, astroid.Instance):
                     try:
    @@ -436,10 +547,10 @@ def leave_function(self, node):
                     return
                 class_node = node.parent.frame()
                 if (self._meth_could_be_func and node.type == 'method'
    -                and not node.name in PYMETHODS
    -                and not (node.is_abstract() or
    -                         overrides_a_method(class_node, node.name))
    -                and class_node.type != 'interface'):
    +                    and not node.name in PYMETHODS
    +                    and not (node.is_abstract() or
    +                             overrides_a_method(class_node, node.name))
    +                    and class_node.type != 'interface'):
                     self.add_message('no-self-use', node=node)
     
         def visit_getattr(self, node):
    @@ -451,7 +562,7 @@ class member from outside its class (but ignore __special__
             attrname = node.attrname
             # Check self
             if self.is_first_attr(node):
    -            self._accessed[-1].setdefault(attrname, []).append(node)
    +            self._accessed[-1][attrname].append(node)
                 return
             if not self.linter.is_message_enabled('protected-access'):
                 return
    @@ -460,7 +571,39 @@ class member from outside its class (but ignore __special__
     
         def visit_assattr(self, node):
             if isinstance(node.ass_type(), astroid.AugAssign) and self.is_first_attr(node):
    -            self._accessed[-1].setdefault(node.attrname, []).append(node)
    +            self._accessed[-1][node.attrname].append(node)
    +        self._check_in_slots(node)
    +
    +    def _check_in_slots(self, node):
    +        """ Check that the given assattr node
    +        is defined in the class slots.
    +        """
    +        infered = safe_infer(node.expr)
    +        if infered and isinstance(infered, Instance):
    +            klass = infered._proxied
    +            if '__slots__' not in klass.locals or not klass.newstyle:
    +                return
    +
    +            slots = klass.slots()
    +            if slots is None:
    +                return
    +            # If any ancestor doesn't use slots, the slots
    +            # defined for this class are superfluous.
    +            if any('__slots__' not in ancestor.locals and
    +                   ancestor.name != 'object'
    +                   for ancestor in klass.ancestors()):
    +                return
    +
    +            if not any(slot.value == node.attrname for slot in slots):
    +                # If we have a '__dict__' in slots, then
    +                # assigning any name is valid.
    +                if not any(slot.value == '__dict__' for slot in slots):
    +                    if _is_attribute_property(node.attrname, klass):
    +                        # Properties circumvent the slots mechanism,
    +                        # so we should not emit a warning for them.
    +                        return
    +                    self.add_message('assigning-non-slot',
    +                                     args=(node.attrname, ), node=node)
     
         @check_messages('protected-access')
         def visit_assign(self, assign_node):
    @@ -485,7 +628,8 @@ def _check_protected_attribute_access(self, node):
             '''
             attrname = node.attrname
     
    -        if is_attr_protected(attrname):
    +        if (is_attr_protected(attrname) and
    +                attrname not in self.config.exclude_protected):
     
                 klass = node_frame_class(node)
     
    @@ -508,6 +652,23 @@ def _check_protected_attribute_access(self, node):
                 # We are in a class, one remaining valid cases, Klass._attr inside
                 # Klass
                 if not (callee == klass.name or callee in klass.basenames):
    +                # Detect property assignments in the body of the class.
    +                # This is acceptable:
    +                #
    +                # class A:
    +                #     b = property(lambda: self._b)
    +
    +                stmt = node.parent.statement()
    +                try:
    +                    if (isinstance(stmt, astroid.Assign) and
    +                            (stmt in klass.body or klass.parent_of(stmt)) and
    +                            isinstance(stmt.value, astroid.CallFunc) and
    +                            isinstance(stmt.value.func, astroid.Name) and
    +                            stmt.value.func.name == 'property' and
    +                            is_builtin_object(next(stmt.value.func.infer(), None))):
    +                        return
    +                except astroid.InferenceError:
    +                    pass
                     self.add_message('protected-access', node=node, args=attrname)
     
         def visit_name(self, node):
    @@ -521,7 +682,7 @@ def visit_name(self, node):
         def _check_accessed_members(self, node, accessed):
             """check that accessed members are defined"""
             # XXX refactor, probably much simpler now that E0201 is in type checker
    -        for attr, nodes in accessed.iteritems():
    +        for attr, nodes in six.iteritems(accessed):
                 # deactivate "except doesn't do anything", that's expected
                 # pylint: disable=W0704
                 try:
    @@ -533,7 +694,7 @@ def _check_accessed_members(self, node, accessed):
                     pass
                 # is it an instance attribute of a parent class ?
                 try:
    -                node.instance_attr_ancestors(attr).next()
    +                next(node.instance_attr_ancestors(attr))
                     # yes, stop here
                     continue
                 except StopIteration:
    @@ -565,7 +726,8 @@ def _check_accessed_members(self, node, accessed):
                         lno = defstmt.fromlineno
                         for _node in nodes:
                             if _node.frame() is frame and _node.fromlineno < lno \
    -                           and not are_exclusive(_node.statement(), defstmt, ('AttributeError', 'Exception', 'BaseException')):
    +                           and not are_exclusive(_node.statement(), defstmt,
    +                                                 ('AttributeError', 'Exception', 'BaseException')):
                                 self.add_message('access-member-before-definition',
                                                  node=_node, args=(attr, lno))
     
    @@ -588,8 +750,8 @@ def _check_first_arg_for_type(self, node, metaclass=0):
             # static method
             if node.type == 'staticmethod':
                 if (first_arg == 'self' or
    -                first_arg in self.config.valid_classmethod_first_arg or
    -                first_arg in self.config.valid_metaclass_classmethod_first_arg):
    +                    first_arg in self.config.valid_classmethod_first_arg or
    +                    first_arg in self.config.valid_metaclass_classmethod_first_arg):
                     self.add_message('bad-staticmethod-argument', args=first, node=node)
                     return
                 self._first_attrs[-1] = None
    @@ -600,20 +762,25 @@ def _check_first_arg_for_type(self, node, metaclass=0):
             elif metaclass:
                 # metaclass __new__ or classmethod
                 if node.type == 'classmethod':
    -                self._check_first_arg_config(first,
    +                self._check_first_arg_config(
    +                    first,
                         self.config.valid_metaclass_classmethod_first_arg, node,
                         'bad-mcs-classmethod-argument', node.name)
                 # metaclass regular method
                 else:
    -                self._check_first_arg_config(first,
    -                    self.config.valid_classmethod_first_arg, node, 'bad-mcs-method-argument',
    +                self._check_first_arg_config(
    +                    first,
    +                    self.config.valid_classmethod_first_arg, node,
    +                    'bad-mcs-method-argument',
                         node.name)
             # regular class
             else:
                 # class method
                 if node.type == 'classmethod':
    -                self._check_first_arg_config(first,
    -                    self.config.valid_classmethod_first_arg, node, 'bad-classmethod-argument',
    +                self._check_first_arg_config(
    +                    first,
    +                    self.config.valid_classmethod_first_arg, node,
    +                    'bad-classmethod-argument',
                         node.name)
                 # regular method without self as argument
                 elif first != 'self':
    @@ -625,32 +792,36 @@ def _check_first_arg_config(self, first, config, node, message,
                 if len(config) == 1:
                     valid = repr(config[0])
                 else:
    -                valid = ', '.join(
    -                  repr(v)
    -                  for v in config[:-1])
    -                valid = '%s or %r' % (
    -                    valid, config[-1])
    +                valid = ', '.join(repr(v) for v in config[:-1])
    +                valid = '%s or %r' % (valid, config[-1])
                 self.add_message(message, args=(method_name, valid), node=node)
     
         def _check_bases_classes(self, node):
             """check that the given class node implements abstract methods from
             base classes
             """
    +        def is_abstract(method):
    +            return method.is_abstract(pass_is_abstract=False)
    +
             # check if this class abstract
             if class_is_abstract(node):
                 return
    -        for method in node.methods():
    +
    +        methods = sorted(
    +            unimplemented_abstract_methods(node, is_abstract).items(),
    +            key=lambda item: item[0],
    +        )
    +        for name, method in methods:
                 owner = method.parent.frame()
                 if owner is node:
                     continue
                 # owner is not this class, it must be a parent class
                 # check that the ancestor's method is not abstract
    -            if method.name in node.locals:
    +            if name in node.locals:
                     # it is redefined as an attribute or with a descriptor
                     continue
    -            if method.is_abstract(pass_is_abstract=False):
    -                self.add_message('abstract-method', node=node,
    -                                 args=(method.name, owner.name))
    +            self.add_message('abstract-method', node=node,
    +                             args=(name, owner.name))
     
         def _check_interfaces(self, node):
             """check that the given class node really implements declared
    @@ -678,7 +849,8 @@ def iface_handler(obj):
                         try:
                             method = node_method(node, name)
                         except astroid.NotFoundError:
    -                        self.add_message('missing-interface-method', args=(name, iface.name),
    +                        self.add_message('missing-interface-method',
    +                                         args=(name, iface.name),
                                              node=node)
                             continue
                         # ignore inherited methods
    @@ -686,7 +858,7 @@ def iface_handler(obj):
                             continue
                         # check signature
                         self._check_signature(method, imethod,
    -                                         '%s interface' % iface.name)
    +                                          '%s interface' % iface.name)
             except astroid.InferenceError:
                 if e0221_hack[0]:
                     return
    @@ -705,7 +877,7 @@ def _check_init(self, node):
             method
             """
             if (not self.linter.is_message_enabled('super-init-not-called') and
    -            not self.linter.is_message_enabled('non-parent-init-called')):
    +                not self.linter.is_message_enabled('non-parent-init-called')):
                 return
             klass_node = node.parent.frame()
             to_call = _ancestors_to_call(klass_node)
    @@ -717,21 +889,34 @@ def _check_init(self, node):
                     continue
                 # skip the test if using super
                 if isinstance(expr.expr, astroid.CallFunc) and \
    -               isinstance(expr.expr.func, astroid.Name) and \
    +                   isinstance(expr.expr.func, astroid.Name) and \
                    expr.expr.func.name == 'super':
                     return
                 try:
    -                klass = expr.expr.infer().next()
    -                if klass is YES:
    -                    continue
    -                try:
    -                    del not_called_yet[klass]
    -                except KeyError:
    -                    if klass not in to_call:
    -                        self.add_message('non-parent-init-called', node=expr, args=klass.name)
    +                for klass in expr.expr.infer():
    +                    if klass is YES:
    +                        continue
    +                    # The infered klass can be super(), which was
    +                    # assigned to a variable and the `__init__`
    +                    # was called later.
    +                    #
    +                    # base = super()
    +                    # base.__init__(...)
    +
    +                    if (isinstance(klass, astroid.Instance) and
    +                            isinstance(klass._proxied, astroid.Class) and
    +                            is_builtin_object(klass._proxied) and
    +                            klass._proxied.name == 'super'):
    +                        return
    +                    try:
    +                        del not_called_yet[klass]
    +                    except KeyError:
    +                        if klass not in to_call:
    +                            self.add_message('non-parent-init-called',
    +                                             node=expr, args=klass.name)
                 except astroid.InferenceError:
                     continue
    -        for klass, method in not_called_yet.iteritems():
    +        for klass, method in six.iteritems(not_called_yet):
                 if klass.name == 'object' or method.parent.name == 'object':
                     continue
                 self.add_message('super-init-not-called', args=klass.name, node=node)
    @@ -743,7 +928,8 @@ def _check_signature(self, method1, refmethod, class_type):
             """
             if not (isinstance(method1, astroid.Function)
                     and isinstance(refmethod, astroid.Function)):
    -            self.add_message('method-check-failed', args=(method1, refmethod), node=method1)
    +            self.add_message('method-check-failed',
    +                             args=(method1, refmethod), node=method1)
                 return
             # don't care about functions with unknown argument (builtins)
             if method1.args.args is None or refmethod.args.args is None:
    @@ -754,9 +940,13 @@ def _check_signature(self, method1, refmethod, class_type):
             if is_attr_private(method1.name):
                 return
             if len(method1.args.args) != len(refmethod.args.args):
    -            self.add_message('arguments-differ', args=class_type, node=method1)
    +            self.add_message('arguments-differ',
    +                             args=(class_type, method1.name),
    +                             node=method1)
             elif len(method1.args.defaults) < len(refmethod.args.defaults):
    -            self.add_message('signature-differs', args=class_type, node=method1)
    +            self.add_message('signature-differs',
    +                             args=(class_type, method1.name),
    +                             node=method1)
     
         def is_first_attr(self, node):
             """Check that attribute lookup name use first attribute variable name
    @@ -772,7 +962,7 @@ def _ancestors_to_call(klass_node, method='__init__'):
         to_call = {}
         for base_node in klass_node.ancestors(recurs=False):
             try:
    -            to_call[base_node] = base_node.igetattr(method).next()
    +            to_call[base_node] = next(base_node.igetattr(method))
             except astroid.InferenceError:
                 continue
         return to_call
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/design_analysis.py b/pymode/libs/pylint/checkers/design_analysis.py
    similarity index 75%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/design_analysis.py
    rename to pymode/libs/pylint/checkers/design_analysis.py
    index c9ef4dfa..9ff10bf3 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/design_analysis.py
    +++ b/pymode/libs/pylint/checkers/design_analysis.py
    @@ -15,29 +15,19 @@
     # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
     """check for signs of poor design"""
     
    -from astroid import Function, If, InferenceError
    +import re
    +from collections import defaultdict
    +
    +from astroid import If, InferenceError
     
     from pylint.interfaces import IAstroidChecker
     from pylint.checkers import BaseChecker
     from pylint.checkers.utils import check_messages
     
    -import re
    -
     # regexp for ignored argument name
     IGNORED_ARGUMENT_NAMES = re.compile('_.*')
     
     
    -def class_is_abstract(klass):
    -    """return true if the given class node should be considered as an abstract
    -    class
    -    """
    -    for attr in klass.values():
    -        if isinstance(attr, Function):
    -            if attr.is_abstract(pass_is_abstract=False):
    -                return True
    -    return False
    -
    -
     MSGS = {
         'R0901': ('Too many ancestors (%s/%s)',
                   'too-many-ancestors',
    @@ -74,14 +64,6 @@ def class_is_abstract(klass):
                   'too-many-statements',
                   'Used when a function or method has too many statements. You \
                   should then split it in smaller functions / methods.'),
    -
    -    'R0921': ('Abstract class not referenced',
    -              'abstract-class-not-used',
    -              'Used when an abstract class is not used as ancestor anywhere.'),
    -    'R0922': ('Abstract class is only referenced %s times',
    -              'abstract-class-little-used',
    -              'Used when an abstract class is used less than X times as \
    -              ancestor.'),
         'R0923': ('Interface not implemented',
                   'interface-not-implemented',
                   'Used when an interface class is not implemented anywhere.'),
    @@ -105,68 +87,66 @@ class MisdesignChecker(BaseChecker):
         options = (('max-args',
                     {'default' : 5, 'type' : 'int', 'metavar' : '',
                      'help': 'Maximum number of arguments for function / method'}
    -                ),
    +               ),
                    ('ignored-argument-names',
                     {'default' : IGNORED_ARGUMENT_NAMES,
                      'type' :'regexp', 'metavar' : '',
                      'help' : 'Argument names that match this expression will be '
                               'ignored. Default to name with leading underscore'}
    -                ),
    +               ),
                    ('max-locals',
                     {'default' : 15, 'type' : 'int', 'metavar' : '',
                      'help': 'Maximum number of locals for function / method body'}
    -                ),
    +               ),
                    ('max-returns',
                     {'default' : 6, 'type' : 'int', 'metavar' : '',
                      'help': 'Maximum number of return / yield for function / '
                              'method body'}
    -                ),
    +               ),
                    ('max-branches',
                     {'default' : 12, 'type' : 'int', 'metavar' : '',
                      'help': 'Maximum number of branch for function / method body'}
    -                ),
    +               ),
                    ('max-statements',
                     {'default' : 50, 'type' : 'int', 'metavar' : '',
                      'help': 'Maximum number of statements in function / method '
                              'body'}
    -                ),
    +               ),
                    ('max-parents',
                     {'default' : 7,
                      'type' : 'int',
                      'metavar' : '',
                      'help' : 'Maximum number of parents for a class (see R0901).'}
    -                ),
    +               ),
                    ('max-attributes',
                     {'default' : 7,
                      'type' : 'int',
                      'metavar' : '',
                      'help' : 'Maximum number of attributes for a class \
     (see R0902).'}
    -                ),
    +               ),
                    ('min-public-methods',
                     {'default' : 2,
                      'type' : 'int',
                      'metavar' : '',
                      'help' : 'Minimum number of public methods for a class \
     (see R0903).'}
    -                ),
    +               ),
                    ('max-public-methods',
                     {'default' : 20,
                      'type' : 'int',
                      'metavar' : '',
                      'help' : 'Maximum number of public methods for a class \
     (see R0904).'}
    -                ),
    -               )
    +               ),
    +              )
     
         def __init__(self, linter=None):
             BaseChecker.__init__(self, linter)
             self.stats = None
             self._returns = None
             self._branches = None
    -        self._used_abstracts = None
             self._used_ifaces = None
    -        self._abstracts = None
             self._ifaces = None
             self._stmts = 0
     
    @@ -174,33 +154,22 @@ def open(self):
             """initialize visit variables"""
             self.stats = self.linter.add_stats()
             self._returns = []
    -        self._branches = []
    -        self._used_abstracts = {}
    +        self._branches = defaultdict(int)
             self._used_ifaces = {}
    -        self._abstracts = []
             self._ifaces = []
     
    -    # Check 'R0921', 'R0922', 'R0923'
         def close(self):
    -        """check that abstract/interface classes are used"""
    -        for abstract in self._abstracts:
    -            if not abstract in self._used_abstracts:
    -                self.add_message('abstract-class-not-used', node=abstract)
    -            elif self._used_abstracts[abstract] < 2:
    -                self.add_message('abstract-class-little-used', node=abstract,
    -                                 args=self._used_abstracts[abstract])
    +        """check that interface classes are used"""
             for iface in self._ifaces:
                 if not iface in self._used_ifaces:
                     self.add_message('interface-not-implemented', node=iface)
     
         @check_messages('too-many-ancestors', 'too-many-instance-attributes',
                         'too-few-public-methods', 'too-many-public-methods',
    -                    'abstract-class-not-used', 'abstract-class-little-used',
                         'interface-not-implemented')
         def visit_class(self, node):
             """check size of inheritance hierarchy and number of instance attributes
             """
    -        self._inc_branch()
             # Is the total inheritance hierarchy is 7 or less?
             nb_parents = len(list(node.ancestors()))
             if nb_parents > self.config.max_parents:
    @@ -213,10 +182,8 @@ def visit_class(self, node):
                 self.add_message('too-many-instance-attributes', node=node,
                                  args=(len(node.instance_attrs),
                                        self.config.max_attributes))
    -        # update abstract / interface classes structures
    -        if class_is_abstract(node):
    -            self._abstracts.append(node)
    -        elif node.type == 'interface' and node.name != 'Interface':
    +        # update interface classes structures
    +        if node.type == 'interface' and node.name != 'Interface':
                 self._ifaces.append(node)
                 for parent in node.ancestors(False):
                     if parent.name == 'Interface':
    @@ -228,49 +195,47 @@ def visit_class(self, node):
             except InferenceError:
                 # XXX log ?
                 pass
    -        for parent in node.ancestors():
    -            try:
    -                self._used_abstracts[parent] += 1
    -            except KeyError:
    -                self._used_abstracts[parent] = 1
     
    -    @check_messages('too-many-ancestors', 'too-many-instance-attributes',
    -                    'too-few-public-methods', 'too-many-public-methods',
    -                    'abstract-class-not-used', 'abstract-class-little-used',
    -                    'interface-not-implemented')
    +    @check_messages('too-few-public-methods', 'too-many-public-methods')
         def leave_class(self, node):
             """check number of public methods"""
    -        nb_public_methods = 0
    -        special_methods = set()
    -        for method in node.methods():
    -            if not method.name.startswith('_'):
    -                nb_public_methods += 1
    -            if method.name.startswith("__"):
    -                special_methods.add(method.name)
    -        # Does the class contain less than 20 public methods ?
    -        if nb_public_methods > self.config.max_public_methods:
    +        my_methods = sum(1 for method in node.mymethods()
    +                         if not method.name.startswith('_'))
    +        all_methods = sum(1 for method in node.methods()
    +                          if not method.name.startswith('_'))
    +
    +        # Does the class contain less than n public methods ?
    +        # This checks only the methods defined in the current class,
    +        # since the user might not have control over the classes
    +        # from the ancestors. It avoids some false positives
    +        # for classes such as unittest.TestCase, which provides
    +        # a lot of assert methods. It doesn't make sense to warn
    +        # when the user subclasses TestCase to add his own tests.
    +        if my_methods > self.config.max_public_methods:
                 self.add_message('too-many-public-methods', node=node,
    -                             args=(nb_public_methods,
    +                             args=(my_methods,
                                        self.config.max_public_methods))
             # stop here for exception, metaclass and interface classes
             if node.type != 'class':
                 return
    -        # Does the class contain more than 5 public methods ?
    -        if nb_public_methods < self.config.min_public_methods:
    -            self.add_message('R0903', node=node,
    -                             args=(nb_public_methods,
    +
    +        # Does the class contain more than n public methods ?
    +        # This checks all the methods defined by ancestors and
    +        # by the current class.
    +        if all_methods < self.config.min_public_methods:
    +            self.add_message('too-few-public-methods', node=node,
    +                             args=(all_methods,
                                        self.config.min_public_methods))
     
         @check_messages('too-many-return-statements', 'too-many-branches',
    -                    'too-many-arguments', 'too-many-locals', 'too-many-statements')
    +                    'too-many-arguments', 'too-many-locals',
    +                    'too-many-statements')
         def visit_function(self, node):
             """check function name, docstring, arguments, redefinition,
             variable names, max locals
             """
    -        self._inc_branch()
             # init branch and returns counters
             self._returns.append(0)
    -        self._branches.append(0)
             # check number of arguments
             args = node.args.args
             if args is not None:
    @@ -291,7 +256,9 @@ def visit_function(self, node):
             # init statements counter
             self._stmts = 1
     
    -    @check_messages('too-many-return-statements', 'too-many-branches', 'too-many-arguments', 'too-many-locals', 'too-many-statements')
    +    @check_messages('too-many-return-statements', 'too-many-branches',
    +                    'too-many-arguments', 'too-many-locals',
    +                    'too-many-statements')
         def leave_function(self, node):
             """most of the work is done here on close:
             checks for max returns, branch, return in __init__
    @@ -300,7 +267,7 @@ def leave_function(self, node):
             if returns > self.config.max_returns:
                 self.add_message('too-many-return-statements', node=node,
                                  args=(returns, self.config.max_returns))
    -        branches = self._branches.pop()
    +        branches = self._branches[node]
             if branches > self.config.max_branches:
                 self.add_message('too-many-branches', node=node,
                                  args=(branches, self.config.max_branches))
    @@ -327,12 +294,12 @@ def visit_tryexcept(self, node):
             branches = len(node.handlers)
             if node.orelse:
                 branches += 1
    -        self._inc_branch(branches)
    +        self._inc_branch(node, branches)
             self._stmts += branches
     
    -    def visit_tryfinally(self, _):
    +    def visit_tryfinally(self, node):
             """increments the branches counter"""
    -        self._inc_branch(2)
    +        self._inc_branch(node, 2)
             self._stmts += 2
     
         def visit_if(self, node):
    @@ -342,7 +309,7 @@ def visit_if(self, node):
             if node.orelse and (len(node.orelse) > 1 or
                                 not isinstance(node.orelse[0], If)):
                 branches += 1
    -        self._inc_branch(branches)
    +        self._inc_branch(node, branches)
             self._stmts += branches
     
         def visit_while(self, node):
    @@ -350,17 +317,14 @@ def visit_while(self, node):
             branches = 1
             if node.orelse:
                 branches += 1
    -        self._inc_branch(branches)
    +        self._inc_branch(node, branches)
     
         visit_for = visit_while
     
    -    def _inc_branch(self, branchesnum=1):
    +    def _inc_branch(self, node, branchesnum=1):
             """increments the branches counter"""
    -        branches = self._branches
    -        for i in xrange(len(branches)):
    -            branches[i] += branchesnum
    +        self._branches[node.scope()] += branchesnum
     
    -    # FIXME: make a nice report...
     
     def register(linter):
         """required method to auto register this checker """
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/exceptions.py b/pymode/libs/pylint/checkers/exceptions.py
    similarity index 52%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/exceptions.py
    rename to pymode/libs/pylint/checkers/exceptions.py
    index 7e0f3fca..88a8f225 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/exceptions.py
    +++ b/pymode/libs/pylint/checkers/exceptions.py
    @@ -16,43 +16,51 @@
     """
     import sys
     
    -from logilab.common.compat import builtins
    -BUILTINS_NAME = builtins.__name__
     import astroid
    -from astroid import YES, Instance, unpack_infer
    +from astroid import YES, Instance, unpack_infer, List, Tuple
    +from logilab.common.compat import builtins
     
     from pylint.checkers import BaseChecker
    -from pylint.checkers.utils import is_empty, is_raising, check_messages
    -from pylint.interfaces import IAstroidChecker
    +from pylint.checkers.utils import (
    +    is_empty,
    +    is_raising,
    +    check_messages,
    +    inherit_from_std_ex,
    +    EXCEPTIONS_MODULE,
    +    has_known_bases,
    +    safe_infer)
    +from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE
     
    -def infer_bases(klass):
    -    """ Fully infer the bases of the klass node.
     
    -    This doesn't use .ancestors(), because we need
    -    the non-inferable nodes (YES nodes),
    -    which can't be retrieved from .ancestors()
    +def _annotated_unpack_infer(stmt, context=None):
    +    """
    +    Recursively generate nodes inferred by the given statement.
    +    If the inferred value is a list or a tuple, recurse on the elements.
    +    Returns an iterator which yields tuples in the format
    +    ('original node', 'infered node').
         """
    -    for base in klass.bases:
    -        try:
    -            inferit = base.infer().next()
    -        except astroid.InferenceError:
    +    if isinstance(stmt, (List, Tuple)):
    +        for elt in stmt.elts:
    +            inferred = safe_infer(elt)
    +            if inferred and inferred is not YES:
    +                yield elt, inferred
    +        return
    +    for infered in stmt.infer(context):
    +        if infered is YES:
                 continue
    -        if inferit is YES:
    -            yield inferit
    -        else:
    -            for base in infer_bases(inferit):
    -                yield base
    +        yield stmt, infered
    +
     
     PY3K = sys.version_info >= (3, 0)
     OVERGENERAL_EXCEPTIONS = ('Exception',)
    -
    +BUILTINS_NAME = builtins.__name__
     MSGS = {
         'E0701': ('Bad except clauses order (%s)',
                   'bad-except-order',
                   'Used when except clauses are not in the correct order (from the '
                   'more specific to the more generic). If you don\'t fix the order, '
                   'some exceptions may not be catched by the most specific handler.'),
    -    'E0702': ('Raising %s while only classes, instances or string are allowed',
    +    'E0702': ('Raising %s while only classes or instances are allowed',
                   'raising-bad-type',
                   'Used when something which is neither a class, an instance or a \
                   string is raised (i.e. a `TypeError` will be raised).'),
    @@ -75,10 +83,6 @@ def infer_bases(klass):
                   'catching-non-exception',
                   'Used when a class which doesn\'t inherit from \
                    BaseException is used as an exception in an except clause.'),
    -
    -    'W0701': ('Raising a string exception',
    -              'raising-string',
    -              'Used when a string exception is raised.'),
         'W0702': ('No exception type(s) specified',
                   'bare-except',
                   'Used when an except clause doesn\'t specify exceptions type to \
    @@ -101,25 +105,9 @@ def infer_bases(klass):
                   'Used when the exception to catch is of the form \
                   "except A or B:".  If intending to catch multiple, \
                   rewrite as "except (A, B):"'),
    -    'W0712': ('Implicit unpacking of exceptions is not supported in Python 3',
    -              'unpacking-in-except',
    -              'Python3 will not allow implicit unpacking of exceptions in except '
    -              'clauses. '
    -              'See http://www.python.org/dev/peps/pep-3110/',
    -              {'maxversion': (3, 0)}),
    -    'W0713': ('Indexing exceptions will not work on Python 3',
    -              'indexing-exception',
    -              'Indexing exceptions will not work on Python 3. Use '
    -              '`exception.args[index]` instead.',
    -              {'maxversion': (3, 0)}),
         }
     
     
    -if sys.version_info < (3, 0):
    -    EXCEPTIONS_MODULE = "exceptions"
    -else:
    -    EXCEPTIONS_MODULE = "builtins"
    -
     class ExceptionsChecker(BaseChecker):
         """checks for
         * excepts without exception filter
    @@ -137,98 +125,148 @@ class ExceptionsChecker(BaseChecker):
                      'help' : 'Exceptions that will emit a warning '
                               'when being caught. Defaults to "%s"' % (
                                   ', '.join(OVERGENERAL_EXCEPTIONS),)}
    -                ),
    -               )
    +               ),
    +              )
     
    -    @check_messages('raising-string', 'nonstandard-exception', 'raising-bad-type',
    -                    'raising-non-exception', 'notimplemented-raised', 'bad-exception-context')
    +    @check_messages('nonstandard-exception',
    +                    'raising-bad-type', 'raising-non-exception',
    +                    'notimplemented-raised', 'bad-exception-context')
         def visit_raise(self, node):
             """visit raise possibly inferring value"""
             # ignore empty raise
             if node.exc is None:
                 return
             if PY3K and node.cause:
    -            try:
    -                cause = node.cause.infer().next()
    -            except astroid.InferenceError:
    -                pass
    -            else:
    -                if cause is YES:
    -                    return
    -                if isinstance(cause, astroid.Const):
    -                    if cause.value is not None:
    -                        self.add_message('bad-exception-context',
    -                                         node=node)
    -                elif (not isinstance(cause, astroid.Class) and
    -                      not inherit_from_std_ex(cause)):
    -                    self.add_message('bad-exception-context',
    -                                      node=node)
    +            self._check_bad_exception_context(node)
    +
             expr = node.exc
             if self._check_raise_value(node, expr):
                 return
             else:
                 try:
    -                value = unpack_infer(expr).next()
    +                value = next(unpack_infer(expr))
                 except astroid.InferenceError:
                     return
                 self._check_raise_value(node, value)
     
    +    def _check_bad_exception_context(self, node):
    +        """Verify that the exception context is properly set.
    +
    +        An exception context can be only `None` or an exception.
    +        """
    +        cause = safe_infer(node.cause)
    +        if cause in (YES, None):
    +            return
    +        if isinstance(cause, astroid.Const):
    +            if cause.value is not None:
    +                self.add_message('bad-exception-context',
    +                                 node=node)
    +        elif (not isinstance(cause, astroid.Class) and
    +              not inherit_from_std_ex(cause)):
    +            self.add_message('bad-exception-context',
    +                             node=node)
    +
         def _check_raise_value(self, node, expr):
             """check for bad values, string exception and class inheritance
             """
             value_found = True
             if isinstance(expr, astroid.Const):
                 value = expr.value
    -            if isinstance(value, str):
    -                self.add_message('raising-string', node=node)
    -            else:
    +            if not isinstance(value, str):
    +                # raising-string will be emitted from python3 porting checker.
                     self.add_message('raising-bad-type', node=node,
                                      args=value.__class__.__name__)
    -        elif (isinstance(expr, astroid.Name) and \
    -                 expr.name in ('None', 'True', 'False')) or \
    -                 isinstance(expr, (astroid.List, astroid.Dict, astroid.Tuple,
    -                                   astroid.Module, astroid.Function)):
    -            self.add_message('raising-bad-type', node=node, args=expr.name)
    +        elif ((isinstance(expr, astroid.Name) and
    +               expr.name in ('None', 'True', 'False')) or
    +              isinstance(expr, (astroid.List, astroid.Dict, astroid.Tuple,
    +                                astroid.Module, astroid.Function))):
    +            emit = True
    +            if not PY3K and isinstance(expr, astroid.Tuple):
    +                # On Python 2, using the following is not an error:
    +                #    raise (ZeroDivisionError, None)
    +                #    raise (ZeroDivisionError, )
    +                # What's left to do is to check that the first
    +                # argument is indeed an exception.
    +                # Verifying the other arguments is not
    +                # the scope of this check.
    +                first = expr.elts[0]
    +                inferred = safe_infer(first)
    +                if isinstance(inferred, Instance):
    +                    # pylint: disable=protected-access
    +                    inferred = inferred._proxied
    +                if (inferred is YES or
    +                        isinstance(inferred, astroid.Class)
    +                        and inherit_from_std_ex(inferred)):
    +                    emit = False
    +            if emit:
    +                self.add_message('raising-bad-type',
    +                                 node=node,
    +                                 args=expr.name)
             elif ((isinstance(expr, astroid.Name) and expr.name == 'NotImplemented')
                   or (isinstance(expr, astroid.CallFunc) and
                       isinstance(expr.func, astroid.Name) and
                       expr.func.name == 'NotImplemented')):
                 self.add_message('notimplemented-raised', node=node)
    -        elif isinstance(expr, astroid.BinOp) and expr.op == '%':
    -            self.add_message('raising-string', node=node)
             elif isinstance(expr, (Instance, astroid.Class)):
                 if isinstance(expr, Instance):
    +                # pylint: disable=protected-access
                     expr = expr._proxied
                 if (isinstance(expr, astroid.Class) and
    -                    not inherit_from_std_ex(expr) and
    -                    expr.root().name != BUILTINS_NAME):
    +                    not inherit_from_std_ex(expr)):
                     if expr.newstyle:
                         self.add_message('raising-non-exception', node=node)
                     else:
    -                    self.add_message('nonstandard-exception', node=node)
    +                    if has_known_bases(expr):
    +                        confidence = INFERENCE
    +                    else:
    +                        confidence = INFERENCE_FAILURE
    +                    self.add_message(
    +                        'nonstandard-exception', node=node,
    +                        confidence=confidence)
                 else:
                     value_found = False
             else:
                 value_found = False
             return value_found
     
    -    @check_messages('unpacking-in-except')
    -    def visit_excepthandler(self, node):
    -        """Visit an except handler block and check for exception unpacking."""
    -        if isinstance(node.name, (astroid.Tuple, astroid.List)):
    -            self.add_message('unpacking-in-except', node=node)
    +    def _check_catching_non_exception(self, handler, exc, part):
    +        if isinstance(exc, astroid.Tuple):
    +            # Check if it is a tuple of exceptions.
    +            inferred = [safe_infer(elt) for elt in exc.elts]
    +            if any(node is astroid.YES for node in inferred):
    +                # Don't emit if we don't know every component.
    +                return
    +            if all(node and inherit_from_std_ex(node)
    +                   for node in inferred):
    +                return
     
    -    @check_messages('indexing-exception')
    -    def visit_subscript(self, node):
    -        """ Look for indexing exceptions. """
    -        try:
    -            for infered in node.value.infer():
    -                if not isinstance(infered, astroid.Instance):
    -                    continue
    -                if inherit_from_std_ex(infered):
    -                    self.add_message('indexing-exception', node=node)
    -        except astroid.InferenceError:
    +        if not isinstance(exc, astroid.Class):
    +            # Don't emit the warning if the infered stmt
    +            # is None, but the exception handler is something else,
    +            # maybe it was redefined.
    +            if (isinstance(exc, astroid.Const) and
    +                    exc.value is None):
    +                if ((isinstance(handler.type, astroid.Const) and
    +                     handler.type.value is None) or
    +                        handler.type.parent_of(exc)):
    +                    # If the exception handler catches None or
    +                    # the exception component, which is None, is
    +                    # defined by the entire exception handler, then
    +                    # emit a warning.
    +                    self.add_message('catching-non-exception',
    +                                     node=handler.type,
    +                                     args=(part.as_string(), ))
    +            else:
    +                self.add_message('catching-non-exception',
    +                                 node=handler.type,
    +                                 args=(part.as_string(), ))
                 return
    +        if (not inherit_from_std_ex(exc) and
    +                exc.root().name != BUILTINS_NAME):
    +            if has_known_bases(exc):
    +                self.add_message('catching-non-exception',
    +                                 node=handler.type,
    +                                 args=(exc.name, ))
     
         @check_messages('bare-except', 'broad-except', 'pointless-except',
                         'binary-op-exception', 'bad-except-order',
    @@ -237,70 +275,58 @@ def visit_tryexcept(self, node):
             """check for empty except"""
             exceptions_classes = []
             nb_handlers = len(node.handlers)
    -        for index, handler  in enumerate(node.handlers):
    +        for index, handler in enumerate(node.handlers):
                 # single except doing nothing but "pass" without else clause
    -            if nb_handlers == 1 and is_empty(handler.body) and not node.orelse:
    -                self.add_message('pointless-except', node=handler.type or handler.body[0])
    +            if is_empty(handler.body) and not node.orelse:
    +                self.add_message('pointless-except',
    +                                 node=handler.type or handler.body[0])
                 if handler.type is None:
    -                if nb_handlers == 1 and not is_raising(handler.body):
    +                if not is_raising(handler.body):
                         self.add_message('bare-except', node=handler)
                     # check if a "except:" is followed by some other
                     # except
    -                elif index < (nb_handlers - 1):
    +                if index < (nb_handlers - 1):
                         msg = 'empty except clause should always appear last'
                         self.add_message('bad-except-order', node=node, args=msg)
     
                 elif isinstance(handler.type, astroid.BoolOp):
    -                self.add_message('binary-op-exception', node=handler, args=handler.type.op)
    +                self.add_message('binary-op-exception',
    +                                 node=handler, args=handler.type.op)
                 else:
                     try:
    -                    excs = list(unpack_infer(handler.type))
    +                    excs = list(_annotated_unpack_infer(handler.type))
                     except astroid.InferenceError:
                         continue
    -                for exc in excs:
    -                    # XXX skip other non class nodes
    -                    if exc is YES or not isinstance(exc, astroid.Class):
    +                for part, exc in excs:
    +                    if exc is YES:
    +                        continue
    +                    if (isinstance(exc, astroid.Instance)
    +                            and inherit_from_std_ex(exc)):
    +                        # pylint: disable=protected-access
    +                        exc = exc._proxied
    +
    +                    self._check_catching_non_exception(handler, exc, part)
    +
    +                    if not isinstance(exc, astroid.Class):
                             continue
    +
                         exc_ancestors = [anc for anc in exc.ancestors()
                                          if isinstance(anc, astroid.Class)]
                         for previous_exc in exceptions_classes:
                             if previous_exc in exc_ancestors:
                                 msg = '%s is an ancestor class of %s' % (
                                     previous_exc.name, exc.name)
    -                            self.add_message('bad-except-order', node=handler.type, args=msg)
    +                            self.add_message('bad-except-order',
    +                                             node=handler.type, args=msg)
                         if (exc.name in self.config.overgeneral_exceptions
    -                        and exc.root().name == EXCEPTIONS_MODULE
    -                        and nb_handlers == 1 and not is_raising(handler.body)):
    -                        self.add_message('broad-except', args=exc.name, node=handler.type)
    -
    -                    if (not inherit_from_std_ex(exc) and
    -                        exc.root().name != BUILTINS_NAME):
    -                        # try to see if the exception is based on a C based
    -                        # exception, by infering all the base classes and
    -                        # looking for inference errors
    -                        bases = infer_bases(exc)
    -                        fully_infered = all(inferit is not YES
    -                                            for inferit in bases)
    -                        if fully_infered:
    -                            self.add_message('catching-non-exception',
    -                                             node=handler.type,
    -                                             args=(exc.name, ))
    +                            and exc.root().name == EXCEPTIONS_MODULE
    +                            and not is_raising(handler.body)):
    +                        self.add_message('broad-except',
    +                                         args=exc.name, node=handler.type)
     
    -                exceptions_classes += excs
    +                exceptions_classes += [exc for _, exc in excs]
     
     
    -def inherit_from_std_ex(node):
    -    """return true if the given class node is subclass of
    -    exceptions.Exception
    -    """
    -    if node.name in ('Exception', 'BaseException') \
    -            and node.root().name == EXCEPTIONS_MODULE:
    -        return True
    -    for parent in node.ancestors(recurs=False):
    -        if inherit_from_std_ex(parent):
    -            return True
    -    return False
    -
     def register(linter):
         """required method to auto register this checker"""
         linter.register_checker(ExceptionsChecker(linter))
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/format.py b/pymode/libs/pylint/checkers/format.py
    similarity index 84%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/format.py
    rename to pymode/libs/pylint/checkers/format.py
    index 8b73049c..8c496ac1 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/format.py
    +++ b/pymode/libs/pylint/checkers/format.py
    @@ -24,9 +24,10 @@
     import keyword
     import sys
     import tokenize
    +from functools import reduce # pylint: disable=redefined-builtin
     
    -if not hasattr(tokenize, 'NL'):
    -    raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
    +import six
    +from six.moves import zip, map, filter # pylint: disable=redefined-builtin
     
     from astroid import nodes
     
    @@ -65,10 +66,10 @@
         'C0301': ('Line too long (%s/%s)',
                   'line-too-long',
                   'Used when a line is longer than a given number of characters.'),
    -    'C0302': ('Too many lines in module (%s)', # was W0302
    +    'C0302': ('Too many lines in module (%s/%s)', # was W0302
                   'too-many-lines',
                   'Used when a module has too much lines, reducing its readability.'
    -              ),
    +             ),
         'C0303': ('Trailing whitespace',
                   'trailing-whitespace',
                   'Used when there is whitespace between the end of a line and the '
    @@ -104,28 +105,20 @@
                    'bracket or block opener.'),
                   {'old_names': [('C0323', 'no-space-after-operator'),
                                  ('C0324', 'no-space-after-comma'),
    -                             ('C0322', 'no-space-before-operator')]})
    -    }
    -
    -
    -if sys.version_info < (3, 0):
    -
    -    MSGS.update({
    -    'W0331': ('Use of the <> operator',
    -              'old-ne-operator',
    -              'Used when the deprecated "<>" operator is used instead \
    -              of "!=".'),
    +                             ('C0322', 'no-space-before-operator')]}),
         'W0332': ('Use of "l" as long integer identifier',
                   'lowercase-l-suffix',
                   'Used when a lower case "l" is used to mark a long integer. You '
                   'should use a upper case "L" since the letter "l" looks too much '
    -              'like the digit "1"'),
    -    'W0333': ('Use of the `` operator',
    -              'backtick',
    -              'Used when the deprecated "``" (backtick) operator is used '
    -              'instead  of the str() function.',
    -              {'scope': WarningScope.NODE}),
    -    })
    +              'like the digit "1"',
    +              {'maxversion': (3, 0)}),
    +    'C0327': ('Mixed line endings LF and CRLF',
    +              'mixed-line-endings',
    +              'Used when there are mixed (LF and CRLF) newline signs in a file.'),
    +    'C0328': ('Unexpected line ending format. There is \'%s\' while it should be \'%s\'.',
    +              'unexpected-line-ending-format',
    +              'Used when there is different newline than expected.'),
    +    }
     
     
     def _underline_token(token):
    @@ -145,29 +138,28 @@ def _column_distance(token1, token2):
     
     
     def _last_token_on_line_is(tokens, line_end, token):
    -    return (
    -        line_end > 0 and tokens.token(line_end-1) == token or
    -        line_end > 1 and tokens.token(line_end-2) == token 
    -        and tokens.type(line_end-1) == tokenize.COMMENT)
    +    return (line_end > 0 and tokens.token(line_end-1) == token or
    +            line_end > 1 and tokens.token(line_end-2) == token
    +            and tokens.type(line_end-1) == tokenize.COMMENT)
     
     
     def _token_followed_by_eol(tokens, position):
    -  return (tokens.type(position+1) == tokenize.NL or
    -          tokens.type(position+1) == tokenize.COMMENT and
    -          tokens.type(position+2) == tokenize.NL)
    +    return (tokens.type(position+1) == tokenize.NL or
    +            tokens.type(position+1) == tokenize.COMMENT and
    +            tokens.type(position+2) == tokenize.NL)
     
     
     def _get_indent_length(line):
    -  """Return the length of the indentation on the given token's line."""
    -  result = 0
    -  for char in line:
    -    if char == ' ':
    -      result += 1
    -    elif char == '\t':
    -      result += _TAB_LENGTH
    -    else:
    -      break
    -  return result
    +    """Return the length of the indentation on the given token's line."""
    +    result = 0
    +    for char in line:
    +        if char == ' ':
    +            result += 1
    +        elif char == '\t':
    +            result += _TAB_LENGTH
    +        else:
    +            break
    +    return result
     
     
     def _get_indent_hint_line(bar_positions, bad_position):
    @@ -311,7 +303,7 @@ def add_block_warning(self, token_position, state, valid_offsets):
             self.retained_warnings.append((token_position, state, valid_offsets))
     
         def get_valid_offsets(self, idx):
    -        """"Returns the valid offsets for the token at the given position."""
    +        """Returns the valid offsets for the token at the given position."""
             # The closing brace on a dict or the 'for' in a dict comprehension may
             # reset two indent levels because the dict value is ended implicitly
             stack_top = -1
    @@ -336,16 +328,19 @@ def _hanging_indent_after_bracket(self, bracket, position):
                     _BeforeBlockOffsets(indentation + self._continuation_size,
                                         indentation + self._continuation_size * 2))
             elif bracket == ':':
    -            if self._cont_stack[-1].context_type == CONTINUED:
    -                # If the dict key was on the same line as the open brace, the new
    -                # correct indent should be relative to the key instead of the
    -                # current indent level
    -                paren_align = self._cont_stack[-1].valid_outdent_offsets
    -                next_align = self._cont_stack[-1].valid_continuation_offsets.copy()
    -                next_align[next_align.keys()[0] + self._continuation_size] = True
    -            else:
    -                next_align = _Offsets(indentation + self._continuation_size, indentation)
    -                paren_align = _Offsets(indentation + self._continuation_size, indentation)
    +            # If the dict key was on the same line as the open brace, the new
    +            # correct indent should be relative to the key instead of the
    +            # current indent level
    +            paren_align = self._cont_stack[-1].valid_outdent_offsets
    +            next_align = self._cont_stack[-1].valid_continuation_offsets.copy()
    +            next_align_keys = list(next_align.keys())
    +            next_align[next_align_keys[0] + self._continuation_size] = True
    +            # Note that the continuation of
    +            # d = {
    +            #       'a': 'b'
    +            #            'c'
    +            # }
    +            # is handled by the special-casing for hanging continued string indents.
                 return _ContinuedIndent(HANGING_DICT_VALUE, bracket, position, paren_align, next_align)
             else:
                 return _ContinuedIndent(
    @@ -358,21 +353,22 @@ def _hanging_indent_after_bracket(self, bracket, position):
         def _continuation_inside_bracket(self, bracket, pos):
             """Extracts indentation information for a continued indent."""
             indentation = _get_indent_length(self._tokens.line(pos))
    -        if self._is_block_opener and self._tokens.start_col(pos+1) - indentation == self._block_indent_size:
    +        token_start = self._tokens.start_col(pos)
    +        next_token_start = self._tokens.start_col(pos + 1)
    +        if self._is_block_opener and next_token_start - indentation == self._block_indent_size:
                 return _ContinuedIndent(
                     CONTINUED_BLOCK,
                     bracket,
                     pos,
    -                _Offsets(self._tokens.start_col(pos)),
    -                _BeforeBlockOffsets(self._tokens.start_col(pos+1),
    -                                    self._tokens.start_col(pos+1) + self._continuation_size))
    +                _Offsets(token_start),
    +                _BeforeBlockOffsets(next_token_start, next_token_start + self._continuation_size))
             else:
                 return _ContinuedIndent(
                     CONTINUED,
                     bracket,
                     pos,
    -                _Offsets(self._tokens.start_col(pos)),
    -                _Offsets(self._tokens.start_col(pos+1)))
    +                _Offsets(token_start),
    +                _Offsets(next_token_start))
     
         def pop_token(self):
             self._cont_stack.pop()
    @@ -404,7 +400,6 @@ class FormatChecker(BaseTokenChecker):
         * unauthorized constructions
         * strict indentation
         * line length
    -    * use of <> instead of !=
         """
     
         __implements__ = (ITokenChecker, IAstroidChecker, IRawChecker)
    @@ -416,7 +411,7 @@ class FormatChecker(BaseTokenChecker):
         # configuration options
         # for available dict keys/values see the optik parser 'add_option' method
         options = (('max-line-length',
    -                {'default' : 80, 'type' : "int", 'metavar' : '',
    +                {'default' : 100, 'type' : "int", 'metavar' : '',
                      'help' : 'Maximum number of characters on a single line.'}),
                    ('ignore-long-lines',
                     {'type': 'regexp', 'metavar': '',
    @@ -424,9 +419,9 @@ class FormatChecker(BaseTokenChecker):
                      'help': ('Regexp for a line that is allowed to be longer than '
                               'the limit.')}),
                    ('single-line-if-stmt',
    -                 {'default': False, 'type' : 'yn', 'metavar' : '',
    -                  'help' : ('Allow the body of an if to be on the same '
    -                            'line as the test if there is no else.')}),
    +                {'default': False, 'type' : 'yn', 'metavar' : '',
    +                 'help' : ('Allow the body of an if to be on the same '
    +                           'line as the test if there is no else.')}),
                    ('no-space-check',
                     {'default': ','.join(_NO_SPACE_CHECK_CHOICES),
                      'type': 'multiple_choice',
    @@ -436,16 +431,21 @@ class FormatChecker(BaseTokenChecker):
                    ('max-module-lines',
                     {'default' : 1000, 'type' : 'int', 'metavar' : '',
                      'help': 'Maximum number of lines in a module'}
    -                ),
    +               ),
                    ('indent-string',
                     {'default' : '    ', 'type' : "string", 'metavar' : '',
    -                 'help' : 'String used as indentation unit. This is usually \
    -"    " (4 spaces) or "\\t" (1 tab).'}),
    +                 'help' : 'String used as indentation unit. This is usually '
    +                          '"    " (4 spaces) or "\\t" (1 tab).'}),
                    ('indent-after-paren',
                     {'type': 'int', 'metavar': '', 'default': 4,
                      'help': 'Number of spaces of indent required inside a hanging '
                              ' or continued line.'}),
    -               )
    +               ('expected-line-ending-format',
    +                {'type': 'choice', 'metavar': '', 'default': '',
    +                 'choices': ['', 'LF', 'CRLF'],
    +                 'help': ('Expected format of line ending, '
    +                          'e.g. empty (any line ending), LF or CRLF.')}),
    +              )
     
         def __init__(self, linter=None):
             BaseTokenChecker.__init__(self, linter)
    @@ -499,7 +499,7 @@ def _check_keyword_parentheses(self, tokens, start):
             keyword_token = tokens[start][1]
             line_num = tokens[start][2][0]
     
    -        for i in xrange(start, len(tokens) - 1):
    +        for i in range(start, len(tokens) - 1):
                 token = tokens[i]
     
                 # If we hit a newline, then assume any parens were for continuation.
    @@ -513,8 +513,9 @@ def _check_keyword_parentheses(self, tokens, start):
                     if not depth:
                         # ')' can't happen after if (foo), since it would be a syntax error.
                         if (tokens[i+1][1] in (':', ')', ']', '}', 'in') or
    -                        tokens[i+1][0] in (tokenize.NEWLINE, tokenize.ENDMARKER,
    -                                             tokenize.COMMENT)):
    +                            tokens[i+1][0] in (tokenize.NEWLINE,
    +                                               tokenize.ENDMARKER,
    +                                               tokenize.COMMENT)):
                             # The empty tuple () is always accepted.
                             if i == start + 2:
                                 return
    @@ -591,7 +592,7 @@ def _handle_colon(self, tokens, i):
             if self._inside_brackets('['):
                 return
             if (self._inside_brackets('{') and
    -            _DICT_SEPARATOR in self.config.no_space_check):
    +                _DICT_SEPARATOR in self.config.no_space_check):
                 policy = (_IGNORE, _IGNORE)
             else:
                 policy = (_MUST_NOT, _MUST)
    @@ -624,13 +625,13 @@ def _policy_string(policy):
                     return 'No', 'allowed'
     
             def _name_construct(token):
    -            if tokens[i][1] == ',':
    +            if token[1] == ',':
                     return 'comma'
    -            elif tokens[i][1] == ':':
    +            elif token[1] == ':':
                     return ':'
    -            elif tokens[i][1] in '()[]{}':
    +            elif token[1] in '()[]{}':
                     return 'bracket'
    -            elif tokens[i][1] in ('<', '>', '<=', '>=', '!=', '=='):
    +            elif token[1] in ('<', '>', '<=', '>=', '!=', '=='):
                     return 'comparison'
                 else:
                     if self._inside_brackets('('):
    @@ -639,7 +640,8 @@ def _name_construct(token):
                         return 'assignment'
     
             good_space = [True, True]
    -        pairs = [(tokens[i-1], tokens[i]), (tokens[i], tokens[i+1])]
    +        token = tokens[i]
    +        pairs = [(tokens[i-1], token), (token, tokens[i+1])]
     
             for other_idx, (policy, token_pair) in enumerate(zip(policies, pairs)):
                 if token_pair[other_idx][0] in _EOL or policy == _IGNORE:
    @@ -660,19 +662,15 @@ def _name_construct(token):
                     if not ok:
                         warnings.append((policy, position))
             for policy, position in warnings:
    -            construct = _name_construct(tokens[i])
    +            construct = _name_construct(token)
                 count, state = _policy_string(policy)
    -            self.add_message('bad-whitespace', line=tokens[i][2][0],
    +            self.add_message('bad-whitespace', line=token[2][0],
                                  args=(count, state, position, construct,
    -                                   _underline_token(tokens[i])))
    +                                   _underline_token(token)))
     
         def _inside_brackets(self, left):
             return self._bracket_stack[-1] == left
     
    -    def _handle_old_ne_operator(self, tokens, i):
    -        if tokens[i][1] == '<>':
    -            self.add_message('old-ne-operator', line=tokens[i][2][0])
    -
         def _prepare_token_dispatcher(self):
             raw = [
                 (_KEYWORD_TOKENS,
    @@ -692,7 +690,6 @@ def _prepare_token_dispatcher(self):
     
                 (['lambda'], self._open_lambda),
     
    -            (['<>'], self._handle_old_ne_operator),
                 ]
     
             dispatch = {}
    @@ -717,6 +714,7 @@ def process_tokens(self, tokens):
             self._lines = {}
             self._visited_lines = {}
             token_handlers = self._prepare_token_dispatcher()
    +        self._last_line_ending = None
     
             self._current_line = ContinuedLineState(tokens, self.config)
             for idx, (tok_type, token, start, _, line) in enumerate(tokens):
    @@ -729,7 +727,7 @@ def process_tokens(self, tokens):
                         self.new_line(TokenWrapper(tokens), idx-1, idx+1)
                     else:
                         self.new_line(TokenWrapper(tokens), idx-1, idx)
    -            
    +
                 if tok_type == tokenize.NEWLINE:
                     # a program statement, or ENDMARKER, will eventually follow,
                     # after some (possibly empty) run of tokens of the form
    @@ -739,6 +737,7 @@ def process_tokens(self, tokens):
                     check_equal = True
                     self._process_retained_warnings(TokenWrapper(tokens), idx)
                     self._current_line.next_logical_line()
    +                self._check_line_ending(token, line_num)
                 elif tok_type == tokenize.INDENT:
                     check_equal = False
                     self.check_indent_level(token, indents[-1]+1, line_num)
    @@ -778,14 +777,41 @@ def process_tokens(self, tokens):
     
             line_num -= 1 # to be ok with "wc -l"
             if line_num > self.config.max_module_lines:
    -            self.add_message('too-many-lines', args=line_num, line=1)
    +            # Get the line where the too-many-lines (or its message id)
    +            # was disabled or default to 1.
    +            symbol = self.linter.msgs_store.check_message_id('too-many-lines')
    +            names = (symbol.msgid, 'too-many-lines')
    +            line = next(filter(None,
    +                               map(self.linter._pragma_lineno.get, names)), 1)
    +            self.add_message('too-many-lines',
    +                             args=(line_num, self.config.max_module_lines),
    +                             line=line)
    +
    +    def _check_line_ending(self, line_ending, line_num):
    +        # check if line endings are mixed
    +        if self._last_line_ending is not None:
    +            if line_ending != self._last_line_ending:
    +                self.add_message('mixed-line-endings', line=line_num)
    +
    +        self._last_line_ending = line_ending
    +
    +        # check if line ending is as expected
    +        expected = self.config.expected_line_ending_format
    +        if expected:
    +            # reduce multiple \n\n\n\n to one \n
    +            line_ending = reduce(lambda x, y: x + y if x != y else x, line_ending, "")
    +            line_ending = 'LF' if line_ending == '\n' else 'CRLF'
    +            if line_ending != expected:
    +                self.add_message('unexpected-line-ending-format', args=(line_ending, expected),
    +                                 line=line_num)
    +
     
         def _process_retained_warnings(self, tokens, current_pos):
             single_line_block_stmt = not _last_token_on_line_is(tokens, current_pos, ':')
     
             for indent_pos, state, offsets in self._current_line.retained_warnings:
                 block_type = offsets[tokens.start_col(indent_pos)]
    -            hints = dict((k, v) for k, v in offsets.iteritems()
    +            hints = dict((k, v) for k, v in six.iteritems(offsets)
                              if v != block_type)
                 if single_line_block_stmt and block_type == WITH_BODY:
                     self._add_continuation_message(state, hints, tokens, indent_pos)
    @@ -793,16 +819,19 @@ def _process_retained_warnings(self, tokens, current_pos):
                     self._add_continuation_message(state, hints, tokens, indent_pos)
     
         def _check_continued_indentation(self, tokens, next_idx):
    +        def same_token_around_nl(token_type):
    +            return (tokens.type(next_idx) == token_type and
    +                    tokens.type(next_idx-2) == token_type)
    +
             # Do not issue any warnings if the next line is empty.
             if not self._current_line.has_content or tokens.type(next_idx) == tokenize.NL:
                 return
     
             state, valid_offsets = self._current_line.get_valid_offsets(next_idx)
    -        # Special handling for hanging comments. If the last line ended with a
    -        # comment and the new line contains only a comment, the line may also be
    -        # indented to the start of the previous comment.
    -        if (tokens.type(next_idx) == tokenize.COMMENT and
    -                tokens.type(next_idx-2) == tokenize.COMMENT):
    +        # Special handling for hanging comments and strings. If the last line ended
    +        # with a comment (string) and the new line contains only a comment, the line
    +        # may also be indented to the start of the previous token.
    +        if same_token_around_nl(tokenize.COMMENT) or same_token_around_nl(tokenize.STRING):
                 valid_offsets[tokens.start_col(next_idx-2)] = True
     
             # We can only decide if the indentation of a continued line before opening
    @@ -839,7 +868,7 @@ def visit_default(self, node):
                 # by taking the last line of the body and adding 1, which
                 # should be the line of finally:
                 if (isinstance(node.parent, nodes.TryFinally)
    -                and node in node.parent.finalbody):
    +                    and node in node.parent.finalbody):
                     prev_line = node.parent.body[0].tolineno + 1
                 else:
                     prev_line = node.parent.statement().fromlineno
    @@ -856,7 +885,7 @@ def visit_default(self, node):
                 tolineno = node.tolineno
             assert tolineno, node
             lines = []
    -        for line in xrange(line, tolineno + 1):
    +        for line in range(line, tolineno + 1):
                 self._visited_lines[line] = 1
                 try:
                     lines.append(self._lines[line].rstrip())
    @@ -872,18 +901,14 @@ def _check_multi_statement_line(self, node, line):
             # For try... except... finally..., the two nodes
             # appear to be on the same line due to how the AST is built.
             if (isinstance(node, nodes.TryExcept) and
    -            isinstance(node.parent, nodes.TryFinally)):
    +                isinstance(node.parent, nodes.TryFinally)):
                 return
             if (isinstance(node.parent, nodes.If) and not node.parent.orelse
    -            and self.config.single_line_if_stmt):
    +                and self.config.single_line_if_stmt):
                 return
             self.add_message('multiple-statements', node=node)
             self._visited_lines[line] = 2
     
    -    @check_messages('backtick')
    -    def visit_backquote(self, node):
    -        self.add_message('backtick', node=node)
    -
         def check_lines(self, lines, i):
             """check lines have less than a maximum number of characters
             """
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/imports.py b/pymode/libs/pylint/checkers/imports.py
    similarity index 86%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/imports.py
    rename to pymode/libs/pylint/checkers/imports.py
    index 8b73c6f6..1969eeb1 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/imports.py
    +++ b/pymode/libs/pylint/checkers/imports.py
    @@ -16,19 +16,31 @@
     """imports checkers for Python code"""
     
     import sys
    +from collections import defaultdict
    +
    +import six
    +from six.moves import map # pylint: disable=redefined-builtin
     
     from logilab.common.graph import get_cycles, DotBackend
    -from logilab.common.modutils import get_module_part, is_standard_module
     from logilab.common.ureports import VerbatimText, Paragraph
     
     import astroid
     from astroid import are_exclusive
    +from astroid.modutils import get_module_part, is_standard_module
     
     from pylint.interfaces import IAstroidChecker
     from pylint.utils import EmptyReport
     from pylint.checkers import BaseChecker
    -from pylint.checkers.utils import check_messages
    +from pylint.checkers.utils import check_messages, is_import_error
     
    +def _except_import_error(node):
    +    """
    +    Check if the try-except node has an ImportError handler.
    +    Return True if an ImportError handler was infered, False otherwise.
    +    """
    +    if not isinstance(node, astroid.TryExcept):
    +        return
    +    return any(map(is_import_error, node.handlers))
     
     def get_first_import(node, context, name, base, level):
         """return the node where [base.] is imported or None if not found
    @@ -48,7 +60,8 @@ def get_first_import(node, context, name, base, level):
                     break
             elif isinstance(first, astroid.From):
                 if level == first.level and any(
    -                fullname == '%s.%s' % (first.modname, iname[0]) for iname in first.names):
    +                    fullname == '%s.%s' % (first.modname, iname[0])
    +                    for iname in first.names):
                     found = True
                     break
         if found and not are_exclusive(first, node):
    @@ -97,14 +110,14 @@ def dependencies_graph(filename, dep_info):
         done = {}
         printer = DotBackend(filename[:-4], rankdir='LR')
         printer.emit('URL="." node[shape="box"]')
    -    for modname, dependencies in sorted(dep_info.iteritems()):
    +    for modname, dependencies in sorted(six.iteritems(dep_info)):
             done[modname] = 1
             printer.emit_node(modname)
             for modname in dependencies:
                 if modname not in done:
                     done[modname] = 1
                     printer.emit_node(modname)
    -    for depmodname, dependencies in sorted(dep_info.iteritems()):
    +    for depmodname, dependencies in sorted(six.iteritems(dep_info)):
             for modname in dependencies:
                 printer.emit_edge(modname, depmodname)
         printer.generate(filename)
    @@ -138,8 +151,9 @@ def make_graph(filename, dep_info, sect, gtype):
                   'Used a module marked as deprecated is imported.'),
         'W0403': ('Relative import %r, should be %r',
                   'relative-import',
    -              'Used when an import relative to the package directory is \
    -              detected.'),
    +              'Used when an import relative to the package directory is '
    +              'detected.',
    +              {'maxversion': (3, 0)}),
         'W0404': ('Reimport %r (imported line %s)',
                   'reimported',
                   'Used when a module is reimported multiple times.'),
    @@ -178,30 +192,29 @@ class ImportsChecker(BaseChecker):
                      'metavar' : '',
                      'help' : 'Deprecated modules which should not be used, \
     separated by a comma'}
    -                ),
    +               ),
                    ('import-graph',
                     {'default' : '',
                      'type' : 'string',
                      'metavar' : '',
                      'help' : 'Create a graph of every (i.e. internal and \
     external) dependencies in the given file (report RP0402 must not be disabled)'}
    -                ),
    +               ),
                    ('ext-import-graph',
                     {'default' : '',
                      'type' : 'string',
                      'metavar' : '',
                      'help' : 'Create a graph of external dependencies in the \
     given file (report RP0402 must not be disabled)'}
    -                ),
    +               ),
                    ('int-import-graph',
                     {'default' : '',
                      'type' : 'string',
                      'metavar' : '',
                      'help' : 'Create a graph of internal dependencies in the \
     given file (report RP0402 must not be disabled)'}
    -                ),
    -
    -               )
    +               ),
    +              )
     
         def __init__(self, linter=None):
             BaseChecker.__init__(self, linter)
    @@ -212,27 +225,28 @@ def __init__(self, linter=None):
                              self.report_external_dependencies),
                             ('RP0402', 'Modules dependencies graph',
                              self.report_dependencies_graph),
    -                        )
    +                       )
     
         def open(self):
             """called before visiting project (i.e set of modules)"""
             self.linter.add_stats(dependencies={})
             self.linter.add_stats(cycles=[])
             self.stats = self.linter.stats
    -        self.import_graph = {}
    +        self.import_graph = defaultdict(set)
     
         def close(self):
             """called before visiting project (i.e set of modules)"""
             # don't try to compute cycles if the associated message is disabled
             if self.linter.is_message_enabled('cyclic-import'):
    -            for cycle in get_cycles(self.import_graph):
    +            vertices = list(self.import_graph)
    +            for cycle in get_cycles(self.import_graph, vertices=vertices):
                     self.add_message('cyclic-import', args=' -> '.join(cycle))
     
         def visit_import(self, node):
             """triggered when an import statement is seen"""
             modnode = node.root()
             for name, _ in node.names:
    -            importedmodnode = self.get_imported_module(modnode, node, name)
    +            importedmodnode = self.get_imported_module(node, name)
                 if importedmodnode is None:
                     continue
                 self._check_relative_import(modnode, node, importedmodnode, name)
    @@ -252,14 +266,14 @@ def visit_from(self, node):
                 if prev:
                     # consecutive future statements are possible
                     if not (isinstance(prev, astroid.From)
    -                       and prev.modname == '__future__'):
    +                        and prev.modname == '__future__'):
                         self.add_message('misplaced-future', node=node)
                 return
             for name, _ in node.names:
                 if name == '*':
                     self.add_message('wildcard-import', args=basename, node=node)
             modnode = node.root()
    -        importedmodnode = self.get_imported_module(modnode, node, basename)
    +        importedmodnode = self.get_imported_module(node, basename)
             if importedmodnode is None:
                 return
             self._check_relative_import(modnode, node, importedmodnode, basename)
    @@ -269,15 +283,16 @@ def visit_from(self, node):
                     self._add_imported_module(node, '%s.%s' % (importedmodnode.name, name))
                     self._check_reimport(node, name, basename, node.level)
     
    -    def get_imported_module(self, modnode, importnode, modname):
    +    def get_imported_module(self, importnode, modname):
             try:
                 return importnode.do_import_module(modname)
    -        except astroid.InferenceError, ex:
    +        except astroid.InferenceError as ex:
                 if str(ex) != modname:
                     args = '%r (%s)' % (modname, ex)
                 else:
                     args = repr(modname)
    -            self.add_message("import-error", args=args, node=importnode)
    +            if not _except_import_error(importnode.parent):
    +                self.add_message("import-error", args=args, node=importnode)
     
         def _check_relative_import(self, modnode, importnode, importedmodnode,
                                    importedasname):
    @@ -294,12 +309,16 @@ def _check_relative_import(self, modnode, importnode, importedmodnode,
                 return False
             if importedmodnode.name != importedasname:
                 # this must be a relative import...
    -            self.add_message('relative-import', args=(importedasname, importedmodnode.name),
    +            self.add_message('relative-import',
    +                             args=(importedasname, importedmodnode.name),
                                  node=importnode)
     
         def _add_imported_module(self, node, importedmodname):
             """notify an imported module, used to analyze dependencies"""
    -        importedmodname = get_module_part(importedmodname)
    +        try:
    +            importedmodname = get_module_part(importedmodname)
    +        except ImportError:
    +            pass
             context_name = node.root().name
             if context_name == importedmodname:
                 # module importing itself !
    @@ -311,8 +330,8 @@ def _add_imported_module(self, node, importedmodname):
                 if not context_name in importedmodnames:
                     importedmodnames.add(context_name)
                 # update import graph
    -            mgraph = self.import_graph.setdefault(context_name, set())
    -            if not importedmodname in mgraph:
    +            mgraph = self.import_graph[context_name]
    +            if importedmodname not in mgraph:
                     mgraph.add(importedmodname)
     
         def _check_deprecated_module(self, node, mod_path):
    @@ -339,7 +358,7 @@ def _check_reimport(self, node, name, basename=None, level=None):
     
         def report_external_dependencies(self, sect, _, dummy):
             """return a verbatim layout for displaying dependencies"""
    -        dep_info = make_tree_defs(self._external_dependencies_info().iteritems())
    +        dep_info = make_tree_defs(six.iteritems(self._external_dependencies_info()))
             if not dep_info:
                 raise EmptyReport()
             tree_str = repr_tree_defs(dep_info)
    @@ -369,9 +388,9 @@ def _external_dependencies_info(self):
             cache them
             """
             if self.__ext_dep_info is None:
    -            package = self.linter.base_name
    +            package = self.linter.current_name
                 self.__ext_dep_info = result = {}
    -            for importee, importers in self.stats['dependencies'].iteritems():
    +            for importee, importers in six.iteritems(self.stats['dependencies']):
                     if not importee.startswith(package):
                         result[importee] = importers
             return self.__ext_dep_info
    @@ -381,9 +400,9 @@ def _internal_dependencies_info(self):
             cache them
             """
             if self.__int_dep_info is None:
    -            package = self.linter.base_name
    +            package = self.linter.current_name
                 self.__int_dep_info = result = {}
    -            for importee, importers in self.stats['dependencies'].iteritems():
    +            for importee, importers in six.iteritems(self.stats['dependencies']):
                     if importee.startswith(package):
                         result[importee] = importers
             return self.__int_dep_info
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/logging.py b/pymode/libs/pylint/checkers/logging.py
    similarity index 69%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/logging.py
    rename to pymode/libs/pylint/checkers/logging.py
    index cbdf0f2a..897c1c7f 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/logging.py
    +++ b/pymode/libs/pylint/checkers/logging.py
    @@ -20,18 +20,29 @@
     from pylint.checkers import utils
     from pylint.checkers.utils import check_messages
     
    +import six
    +
    +
     MSGS = {
         'W1201': ('Specify string format arguments as logging function parameters',
    -             'logging-not-lazy',
    -             'Used when a logging statement has a call form of '
    -             '"logging.(format_string % (format_args...))". '
    -             'Such calls should leave string interpolation to the logging '
    -             'method itself and be written '
    -             '"logging.(format_string, format_args...)" '
    -             'so that the program may avoid incurring the cost of the '
    -             'interpolation in those cases in which no message will be '
    -             'logged. For more, see '
    -             'http://www.python.org/dev/peps/pep-0282/.'),
    +              'logging-not-lazy',
    +              'Used when a logging statement has a call form of '
    +              '"logging.(format_string % (format_args...))". '
    +              'Such calls should leave string interpolation to the logging '
    +              'method itself and be written '
    +              '"logging.(format_string, format_args...)" '
    +              'so that the program may avoid incurring the cost of the '
    +              'interpolation in those cases in which no message will be '
    +              'logged. For more, see '
    +              'http://www.python.org/dev/peps/pep-0282/.'),
    +    'W1202': ('Use % formatting in logging functions but pass the % '
    +              'parameters as arguments',
    +              'logging-format-interpolation',
    +              'Used when a logging statement has a call form of '
    +              '"logging.(format_string.format(format_args...))"'
    +              '. Such calls should use % formatting instead, but leave '
    +              'interpolation to the logging function by passing the parameters '
    +              'as arguments.'),
         'E1200': ('Unsupported logging format character %r (%#02x) at index %d',
                   'logging-unsupported-format',
                   'Used when an unsupported format character is used in a logging\
    @@ -53,6 +64,27 @@
         'critical', 'debug', 'error', 'exception', 'fatal', 'info', 'warn',
         'warning'])
     
    +def is_method_call(callfunc_node, types=(), methods=()):
    +    """Determines if a CallFunc node represents a method call.
    +
    +    Args:
    +      callfunc_node: The CallFunc AST node to check.
    +      types: Optional sequence of caller type names to restrict check.
    +      methods: Optional sequence of method names to restrict check.
    +
    +    Returns:
    +      True, if the node represents a method call for the given type and
    +      method names, False otherwise.
    +    """
    +    if not isinstance(callfunc_node, astroid.CallFunc):
    +        return False
    +    func = utils.safe_infer(callfunc_node.func)
    +    return (isinstance(func, astroid.BoundMethod)
    +            and isinstance(func.bound, astroid.Instance)
    +            and (func.bound.name in types if types else True)
    +            and (func.name in methods if methods else True))
    +
    +
     
     class LoggingChecker(checkers.BaseChecker):
         """Checks use of the logging module."""
    @@ -62,15 +94,15 @@ class LoggingChecker(checkers.BaseChecker):
         msgs = MSGS
     
         options = (('logging-modules',
    -                {'default' : ('logging',),
    -                 'type' : 'csv',
    -                 'metavar' : '',
    -                 'help' : ('Logging modules to check that the string format '
    -                           'arguments are in logging function parameter format')}
    -                ),
    -               )
    -
    -    def visit_module(self, unused_node):
    +                {'default': ('logging',),
    +                 'type': 'csv',
    +                 'metavar': '',
    +                 'help': 'Logging modules to check that the string format '
    +                         'arguments are in logging function parameter format'}
    +               ),
    +              )
    +
    +    def visit_module(self, node): # pylint: disable=unused-argument
             """Clears any state left in this checker from last module checked."""
             # The code being checked can just as easily "import logging as foo",
             # so it is necessary to process the imports and store in this field
    @@ -105,19 +137,19 @@ def visit_import(self, node):
         def visit_callfunc(self, node):
             """Checks calls to logging methods."""
             def is_logging_name():
    -           return (isinstance(node.func, astroid.Getattr) and
    -                   isinstance(node.func.expr, astroid.Name) and 
    -                   node.func.expr.name in self._logging_names)
    +            return (isinstance(node.func, astroid.Getattr) and
    +                    isinstance(node.func.expr, astroid.Name) and
    +                    node.func.expr.name in self._logging_names)
     
             def is_logger_class():
                 try:
                     for inferred in node.func.infer():
                         if isinstance(inferred, astroid.BoundMethod):
                             parent = inferred._proxied.parent
    -                        if (isinstance(parent, astroid.Class) and 
    -                            (parent.qname() == 'logging.Logger' or 
    -                             any(ancestor.qname() == 'logging.Logger' 
    -                                 for ancestor in parent.ancestors()))):
    +                        if (isinstance(parent, astroid.Class) and
    +                                (parent.qname() == 'logging.Logger' or
    +                                 any(ancestor.qname() == 'logging.Logger'
    +                                     for ancestor in parent.ancestors()))):
                                 return True, inferred._proxied.name
                 except astroid.exceptions.InferenceError:
                     pass
    @@ -150,9 +182,20 @@ def _check_log_method(self, node, name):
     
             if isinstance(node.args[format_pos], astroid.BinOp) and node.args[format_pos].op == '%':
                 self.add_message('logging-not-lazy', node=node)
    +        elif isinstance(node.args[format_pos], astroid.CallFunc):
    +            self._check_call_func(node.args[format_pos])
             elif isinstance(node.args[format_pos], astroid.Const):
                 self._check_format_string(node, format_pos)
     
    +    def _check_call_func(self, callfunc_node):
    +        """Checks that function call is not format_string.format().
    +
    +        Args:
    +          callfunc_node: CallFunc AST node to be checked.
    +        """
    +        if is_method_call(callfunc_node, ('str', 'unicode'), ('format',)):
    +            self.add_message('logging-format-interpolation', node=callfunc_node)
    +
         def _check_format_string(self, node, format_arg):
             """Checks that format string tokens match the supplied arguments.
     
    @@ -166,7 +209,7 @@ def _check_format_string(self, node, format_arg):
                 # don't check any further.
                 return
             format_string = node.args[format_arg].value
    -        if not isinstance(format_string, basestring):
    +        if not isinstance(format_string, six.string_types):
                 # If the log format is constant non-string (e.g. logging.debug(5)),
                 # ensure there are no arguments.
                 required_num_args = 0
    @@ -178,7 +221,7 @@ def _check_format_string(self, node, format_arg):
                         # Keyword checking on logging strings is complicated by
                         # special keywords - out of scope.
                         return
    -            except utils.UnsupportedFormatCharacter, ex:
    +            except utils.UnsupportedFormatCharacter as ex:
                     char = format_string[ex.index]
                     self.add_message('logging-unsupported-format', node=node,
                                      args=(char, ord(char), ex.index))
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/misc.py b/pymode/libs/pylint/checkers/misc.py
    similarity index 64%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/misc.py
    rename to pymode/libs/pylint/checkers/misc.py
    index d1b7c216..7fbe70bf 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/misc.py
    +++ b/pymode/libs/pylint/checkers/misc.py
    @@ -21,6 +21,7 @@
     
     from pylint.interfaces import IRawChecker
     from pylint.checkers import BaseChecker
    +import six
     
     
     MSGS = {
    @@ -32,10 +33,11 @@
                   'Used when a source line cannot be decoded using the specified '
                   'source file encoding.',
                   {'maxversion': (3, 0)}),
    -    }
    +}
     
     
     class EncodingChecker(BaseChecker):
    +
         """checks for:
         * warning notes in the code like FIXME, XXX
         * encoding issues.
    @@ -47,22 +49,32 @@ class EncodingChecker(BaseChecker):
         msgs = MSGS
     
         options = (('notes',
    -                {'type' : 'csv', 'metavar' : '',
    -                 'default' : ('FIXME', 'XXX', 'TODO'),
    -                 'help' : 'List of note tags to take in consideration, \
    -separated by a comma.'
    -                 }),
    -               )
    +                {'type': 'csv', 'metavar': '',
    +                 'default': ('FIXME', 'XXX', 'TODO'),
    +                 'help': ('List of note tags to take in consideration, '
    +                          'separated by a comma.')}),)
     
         def _check_note(self, notes, lineno, line):
    +        # First, simply check if the notes are in the line at all. This is an
    +        # optimisation to prevent using the regular expression on every line,
    +        # but rather only on lines which may actually contain one of the notes.
    +        # This prevents a pathological problem with lines that are hundreds
    +        # of thousands of characters long.
    +        for note in self.config.notes:
    +            if note in line:
    +                break
    +        else:
    +            return
    +
             match = notes.search(line)
    -        if match:
    -            self.add_message('fixme', args=line[match.start():-1], line=lineno)
    +        if not match:
    +            return
    +        self.add_message('fixme', args=line[match.start(1):-1], line=lineno)
     
         def _check_encoding(self, lineno, line, file_encoding):
             try:
    -            return unicode(line, file_encoding)
    -        except UnicodeDecodeError, ex:
    +            return six.text_type(line, file_encoding)
    +        except UnicodeDecodeError as ex:
                 self.add_message('invalid-encoded-data', line=lineno,
                                  args=(file_encoding, ex.args[2]))
     
    @@ -70,20 +82,22 @@ def process_module(self, module):
             """inspect the source file to find encoding problem or fixmes like
             notes
             """
    -        stream = module.file_stream
    -        stream.seek(0) # XXX may be removed with astroid > 0.23
             if self.config.notes:
    -            notes = re.compile('|'.join(self.config.notes))
    +            notes = re.compile(
    +                r'.*?#\s*(%s)(:*\s*.+)' % "|".join(self.config.notes))
             else:
                 notes = None
             if module.file_encoding:
                 encoding = module.file_encoding
             else:
                 encoding = 'ascii'
    -        for lineno, line in enumerate(stream):
    -            line = self._check_encoding(lineno+1, line, encoding)
    -            if line is not None and notes:
    -                self._check_note(notes, lineno+1, line)
    +
    +        with module.stream() as stream:
    +            for lineno, line in enumerate(stream):
    +                line = self._check_encoding(lineno + 1, line, encoding)
    +                if line is not None and notes:
    +                    self._check_note(notes, lineno + 1, line)
    +
     
     def register(linter):
         """required method to auto register this checker"""
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/newstyle.py b/pymode/libs/pylint/checkers/newstyle.py
    similarity index 76%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/newstyle.py
    rename to pymode/libs/pylint/checkers/newstyle.py
    index f801c443..f74e7f15 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/newstyle.py
    +++ b/pymode/libs/pylint/checkers/newstyle.py
    @@ -19,9 +19,13 @@
     
     import astroid
     
    -from pylint.interfaces import IAstroidChecker
    +from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
     from pylint.checkers import BaseChecker
    -from pylint.checkers.utils import check_messages
    +from pylint.checkers.utils import (
    +    check_messages,
    +    has_known_bases,
    +    node_frame_class,
    +)
     
     MSGS = {
         'E1001': ('Use of __slots__ on an old style class',
    @@ -43,7 +47,7 @@
                   {'maxversion': (3, 0)}),
         'W1001': ('Use of "property" on an old style class',
                   'property-on-old-class',
    -              'Used when PyLint detect the use of the builtin "property" \
    +              'Used when Pylint detect the use of the builtin "property" \
                   on an old style class while this is relying on new style \
                   classes features.',
                   {'maxversion': (3, 0)}),
    @@ -74,26 +78,35 @@ class NewStyleConflictChecker(BaseChecker):
     
         @check_messages('slots-on-old-class', 'old-style-class')
         def visit_class(self, node):
    -        """check __slots__ usage
    +        """ Check __slots__ in old style classes and old
    +        style class definition.
             """
             if '__slots__' in node and not node.newstyle:
    -            self.add_message('slots-on-old-class', node=node)
    +            confidence = (INFERENCE if has_known_bases(node)
    +                          else INFERENCE_FAILURE)
    +            self.add_message('slots-on-old-class', node=node,
    +                             confidence=confidence)
             # The node type could be class, exception, metaclass, or
             # interface.  Presumably, the non-class-type nodes would always
             # have an explicit base class anyway.
    -        if not node.bases and node.type == 'class':
    -            self.add_message('old-style-class', node=node)
    +        if not node.bases and node.type == 'class' and not node.metaclass():
    +            # We use confidence HIGH here because this message should only ever
    +            # be emitted for classes at the root of the inheritance hierarchyself.
    +            self.add_message('old-style-class', node=node, confidence=HIGH)
     
         @check_messages('property-on-old-class')
         def visit_callfunc(self, node):
             """check property usage"""
             parent = node.parent.frame()
             if (isinstance(parent, astroid.Class) and
    -            not parent.newstyle and
    -            isinstance(node.func, astroid.Name)):
    +                not parent.newstyle and
    +                isinstance(node.func, astroid.Name)):
    +            confidence = (INFERENCE if has_known_bases(parent)
    +                          else INFERENCE_FAILURE)
                 name = node.func.name
                 if name == 'property':
    -                self.add_message('property-on-old-class', node=node)
    +                self.add_message('property-on-old-class', node=node,
    +                                 confidence=confidence)
     
         @check_messages('super-on-old-class', 'bad-super-call', 'missing-super-argument')
         def visit_function(self, node):
    @@ -103,6 +116,9 @@ def visit_function(self, node):
                 return
             klass = node.parent.frame()
             for stmt in node.nodes_of_class(astroid.CallFunc):
    +            if node_frame_class(stmt) != node_frame_class(node):
    +                # Don't look down in other scopes.
    +                continue
                 expr = stmt.func
                 if not isinstance(expr, astroid.Getattr):
                     continue
    @@ -111,9 +127,12 @@ def visit_function(self, node):
                 if isinstance(call, astroid.CallFunc) and \
                    isinstance(call.func, astroid.Name) and \
                    call.func.name == 'super':
    +                confidence = (INFERENCE if has_known_bases(klass)
    +                              else INFERENCE_FAILURE)
                     if not klass.newstyle:
                         # super should not be used on an old style class
    -                    self.add_message('super-on-old-class', node=node)
    +                    self.add_message('super-on-old-class', node=node,
    +                                     confidence=confidence)
                     else:
                         # super first arg should be the class
                         if not call.args and sys.version_info[0] == 3:
    @@ -121,13 +140,14 @@ def visit_function(self, node):
                             continue
     
                         try:
    -                        supcls = (call.args and call.args[0].infer().next()
    +                        supcls = (call.args and next(call.args[0].infer())
                                       or None)
                         except astroid.InferenceError:
                             continue
     
                         if supcls is None:
    -                        self.add_message('missing-super-argument', node=call)
    +                        self.add_message('missing-super-argument', node=call,
    +                                         confidence=confidence)
                             continue
     
                         if klass is not supcls:
    @@ -143,7 +163,8 @@ def visit_function(self, node):
                             if name is not None:
                                 self.add_message('bad-super-call',
                                                  node=call,
    -                                             args=(name, ))
    +                                             args=(name, ),
    +                                             confidence=confidence)
     
     
     def register(linter):
    diff --git a/pymode/libs/pylint/checkers/python3.py b/pymode/libs/pylint/checkers/python3.py
    new file mode 100644
    index 00000000..837cbef1
    --- /dev/null
    +++ b/pymode/libs/pylint/checkers/python3.py
    @@ -0,0 +1,581 @@
    +# Copyright 2014 Google Inc.
    +# This program is free software; you can redistribute it and/or modify it under
    +# the terms of the GNU General Public License as published by the Free Software
    +# Foundation; either version 2 of the License, or (at your option) any later
    +# version.
    +#
    +# This program is distributed in the hope that it will be useful, but WITHOUT
    +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
    +#
    +# You should have received a copy of the GNU General Public License along with
    +# this program; if not, write to the Free Software Foundation, Inc.,
    +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    +"""Check Python 2 code for Python 2/3 source-compatible issues."""
    +from __future__ import absolute_import, print_function
    +
    +import re
    +import tokenize
    +
    +import astroid
    +from astroid import bases
    +from pylint import checkers, interfaces
    +from pylint.utils import WarningScope
    +from pylint.checkers import utils
    +
    +
    +_ZERO = re.compile("^0+$")
    +
    +def _is_old_octal(literal):
    +    if _ZERO.match(literal):
    +        return False
    +    if re.match('0\d+', literal):
    +        try:
    +            int(literal, 8)
    +        except ValueError:
    +            return False
    +        return True
    +
    +def _check_dict_node(node):
    +    inferred_types = set()
    +    try:
    +        inferred = node.infer()
    +        for inferred_node in inferred:
    +            inferred_types.add(inferred_node)
    +    except (astroid.InferenceError, astroid.UnresolvableName):
    +        pass
    +    return (not inferred_types
    +            or any(isinstance(x, astroid.Dict) for x in inferred_types))
    +
    +def _is_builtin(node):
    +    return getattr(node, 'name', None) in ('__builtin__', 'builtins')
    +
    +_accepts_iterator = {'iter', 'list', 'tuple', 'sorted', 'set', 'sum', 'any',
    +                     'all', 'enumerate', 'dict'}
    +
    +def _in_iterating_context(node):
    +    """Check if the node is being used as an iterator.
    +
    +    Definition is taken from lib2to3.fixer_util.in_special_context().
    +    """
    +    parent = node.parent
    +    # Since a call can't be the loop variant we only need to know if the node's
    +    # parent is a 'for' loop to know it's being used as the iterator for the
    +    # loop.
    +    if isinstance(parent, astroid.For):
    +        return True
    +    # Need to make sure the use of the node is in the iterator part of the
    +    # comprehension.
    +    elif isinstance(parent, astroid.Comprehension):
    +        if parent.iter == node:
    +            return True
    +    # Various built-ins can take in an iterable or list and lead to the same
    +    # value.
    +    elif isinstance(parent, astroid.CallFunc):
    +        if isinstance(parent.func, astroid.Name):
    +            parent_scope = parent.func.lookup(parent.func.name)[0]
    +            if _is_builtin(parent_scope) and parent.func.name in _accepts_iterator:
    +                return True
    +        elif isinstance(parent.func, astroid.Getattr):
    +            if parent.func.attrname == 'join':
    +                return True
    +    # If the call is in an unpacking, there's no need to warn,
    +    # since it can be considered iterating.
    +    elif (isinstance(parent, astroid.Assign) and
    +          isinstance(parent.targets[0], (astroid.List, astroid.Tuple))):
    +        if len(parent.targets[0].elts) > 1:
    +            return True
    +    return False
    +
    +
    +class Python3Checker(checkers.BaseChecker):
    +
    +    __implements__ = interfaces.IAstroidChecker
    +    enabled = False
    +    name = 'python3'
    +
    +    msgs = {
    +        # Errors for what will syntactically break in Python 3, warnings for
    +        # everything else.
    +        'E1601': ('print statement used',
    +                  'print-statement',
    +                  'Used when a print statement is used '
    +                  '(`print` is a function in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'E1602': ('Parameter unpacking specified',
    +                  'parameter-unpacking',
    +                  'Used when parameter unpacking is specified for a function'
    +                  "(Python 3 doesn't allow it)",
    +                  {'maxversion': (3, 0)}),
    +        'E1603': ('Implicit unpacking of exceptions is not supported '
    +                  'in Python 3',
    +                  'unpacking-in-except',
    +                  'Python3 will not allow implicit unpacking of '
    +                  'exceptions in except clauses. '
    +                  'See http://www.python.org/dev/peps/pep-3110/',
    +                  {'maxversion': (3, 0),
    +                   'old_names': [('W0712', 'unpacking-in-except')]}),
    +        'E1604': ('Use raise ErrorClass(args) instead of '
    +                  'raise ErrorClass, args.',
    +                  'old-raise-syntax',
    +                  "Used when the alternate raise syntax "
    +                  "'raise foo, bar' is used "
    +                  "instead of 'raise foo(bar)'.",
    +                  {'maxversion': (3, 0),
    +                   'old_names': [('W0121', 'old-raise-syntax')]}),
    +        'E1605': ('Use of the `` operator',
    +                  'backtick',
    +                  'Used when the deprecated "``" (backtick) operator is used '
    +                  'instead  of the str() function.',
    +                  {'scope': WarningScope.NODE,
    +                   'maxversion': (3, 0),
    +                   'old_names': [('W0333', 'backtick')]}),
    +        'W1601': ('apply built-in referenced',
    +                  'apply-builtin',
    +                  'Used when the apply built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1602': ('basestring built-in referenced',
    +                  'basestring-builtin',
    +                  'Used when the basestring built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1603': ('buffer built-in referenced',
    +                  'buffer-builtin',
    +                  'Used when the buffer built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1604': ('cmp built-in referenced',
    +                  'cmp-builtin',
    +                  'Used when the cmp built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1605': ('coerce built-in referenced',
    +                  'coerce-builtin',
    +                  'Used when the coerce built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1606': ('execfile built-in referenced',
    +                  'execfile-builtin',
    +                  'Used when the execfile built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1607': ('file built-in referenced',
    +                  'file-builtin',
    +                  'Used when the file built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1608': ('long built-in referenced',
    +                  'long-builtin',
    +                  'Used when the long built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1609': ('raw_input built-in referenced',
    +                  'raw_input-builtin',
    +                  'Used when the raw_input built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1610': ('reduce built-in referenced',
    +                  'reduce-builtin',
    +                  'Used when the reduce built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1611': ('StandardError built-in referenced',
    +                  'standarderror-builtin',
    +                  'Used when the StandardError built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1612': ('unicode built-in referenced',
    +                  'unicode-builtin',
    +                  'Used when the unicode built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1613': ('xrange built-in referenced',
    +                  'xrange-builtin',
    +                  'Used when the xrange built-in function is referenced '
    +                  '(missing from Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1614': ('__coerce__ method defined',
    +                  'coerce-method',
    +                  'Used when a __coerce__ method is defined '
    +                  '(method is not used by Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1615': ('__delslice__ method defined',
    +                  'delslice-method',
    +                  'Used when a __delslice__ method is defined '
    +                  '(method is not used by Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1616': ('__getslice__ method defined',
    +                  'getslice-method',
    +                  'Used when a __getslice__ method is defined '
    +                  '(method is not used by Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1617': ('__setslice__ method defined',
    +                  'setslice-method',
    +                  'Used when a __setslice__ method is defined '
    +                  '(method is not used by Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1618': ('import missing `from __future__ import absolute_import`',
    +                  'no-absolute-import',
    +                  'Used when an import is not accompanied by '
    +                  '``from __future__ import absolute_import`` '
    +                  '(default behaviour in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1619': ('division w/o __future__ statement',
    +                  'old-division',
    +                  'Used for non-floor division w/o a float literal or '
    +                  '``from __future__ import division`` '
    +                  '(Python 3 returns a float for int division unconditionally)',
    +                  {'maxversion': (3, 0)}),
    +        'W1620': ('Calling a dict.iter*() method',
    +                  'dict-iter-method',
    +                  'Used for calls to dict.iterkeys(), itervalues() or iteritems() '
    +                  '(Python 3 lacks these methods)',
    +                  {'maxversion': (3, 0)}),
    +        'W1621': ('Calling a dict.view*() method',
    +                  'dict-view-method',
    +                  'Used for calls to dict.viewkeys(), viewvalues() or viewitems() '
    +                  '(Python 3 lacks these methods)',
    +                  {'maxversion': (3, 0)}),
    +        'W1622': ('Called a next() method on an object',
    +                  'next-method-called',
    +                  "Used when an object's next() method is called "
    +                  '(Python 3 uses the next() built-in function)',
    +                  {'maxversion': (3, 0)}),
    +        'W1623': ("Assigning to a class' __metaclass__ attribute",
    +                  'metaclass-assignment',
    +                  "Used when a metaclass is specified by assigning to __metaclass__ "
    +                  '(Python 3 specifies the metaclass as a class statement argument)',
    +                  {'maxversion': (3, 0)}),
    +        'W1624': ('Indexing exceptions will not work on Python 3',
    +                  'indexing-exception',
    +                  'Indexing exceptions will not work on Python 3. Use '
    +                  '`exception.args[index]` instead.',
    +                  {'maxversion': (3, 0),
    +                   'old_names': [('W0713', 'indexing-exception')]}),
    +        'W1625': ('Raising a string exception',
    +                  'raising-string',
    +                  'Used when a string exception is raised. This will not '
    +                  'work on Python 3.',
    +                  {'maxversion': (3, 0),
    +                   'old_names': [('W0701', 'raising-string')]}),
    +        'W1626': ('reload built-in referenced',
    +                  'reload-builtin',
    +                  'Used when the reload built-in function is referenced '
    +                  '(missing from Python 3). You can use instead imp.reload '
    +                  'or importlib.reload.',
    +                  {'maxversion': (3, 0)}),
    +        'W1627': ('__oct__ method defined',
    +                  'oct-method',
    +                  'Used when a __oct__ method is defined '
    +                  '(method is not used by Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1628': ('__hex__ method defined',
    +                  'hex-method',
    +                  'Used when a __hex__ method is defined '
    +                  '(method is not used by Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1629': ('__nonzero__ method defined',
    +                  'nonzero-method',
    +                  'Used when a __nonzero__ method is defined '
    +                  '(method is not used by Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1630': ('__cmp__ method defined',
    +                  'cmp-method',
    +                  'Used when a __cmp__ method is defined '
    +                  '(method is not used by Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        # 'W1631': replaced by W1636
    +        'W1632': ('input built-in referenced',
    +                  'input-builtin',
    +                  'Used when the input built-in is referenced '
    +                  '(backwards-incompatible semantics in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1633': ('round built-in referenced',
    +                  'round-builtin',
    +                  'Used when the round built-in is referenced '
    +                  '(backwards-incompatible semantics in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1634': ('intern built-in referenced',
    +                  'intern-builtin',
    +                  'Used when the intern built-in is referenced '
    +                  '(Moved to sys.intern in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1635': ('unichr built-in referenced',
    +                  'unichr-builtin',
    +                  'Used when the unichr built-in is referenced '
    +                  '(Use chr in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1636': ('map built-in referenced when not iterating',
    +                  'map-builtin-not-iterating',
    +                  'Used when the map built-in is referenced in a non-iterating '
    +                  'context (returns an iterator in Python 3)',
    +                  {'maxversion': (3, 0),
    +                   'old_names': [('W1631', 'implicit-map-evaluation')]}),
    +        'W1637': ('zip built-in referenced when not iterating',
    +                  'zip-builtin-not-iterating',
    +                  'Used when the zip built-in is referenced in a non-iterating '
    +                  'context (returns an iterator in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1638': ('range built-in referenced when not iterating',
    +                  'range-builtin-not-iterating',
    +                  'Used when the range built-in is referenced in a non-iterating '
    +                  'context (returns an iterator in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1639': ('filter built-in referenced when not iterating',
    +                  'filter-builtin-not-iterating',
    +                  'Used when the filter built-in is referenced in a non-iterating '
    +                  'context (returns an iterator in Python 3)',
    +                  {'maxversion': (3, 0)}),
    +        'W1640': ('Using the cmp argument for list.sort / sorted',
    +                  'using-cmp-argument',
    +                  'Using the cmp argument for list.sort or the sorted '
    +                  'builtin should be avoided, since it was removed in '
    +                  'Python 3. Using either `key` or `functools.cmp_to_key` '
    +                  'should be preferred.',
    +                  {'maxversion': (3, 0)}),
    +    }
    +
    +    _bad_builtins = frozenset([
    +        'apply',
    +        'basestring',
    +        'buffer',
    +        'cmp',
    +        'coerce',
    +        'execfile',
    +        'file',
    +        'input',  # Not missing, but incompatible semantics
    +        'intern',
    +        'long',
    +        'raw_input',
    +        'reduce',
    +        'round',  # Not missing, but incompatible semantics
    +        'StandardError',
    +        'unichr',
    +        'unicode',
    +        'xrange',
    +        'reload',
    +    ])
    +
    +    _unused_magic_methods = frozenset([
    +        '__coerce__',
    +        '__delslice__',
    +        '__getslice__',
    +        '__setslice__',
    +        '__oct__',
    +        '__hex__',
    +        '__nonzero__',
    +        '__cmp__',
    +    ])
    +
    +    def __init__(self, *args, **kwargs):
    +        self._future_division = False
    +        self._future_absolute_import = False
    +        super(Python3Checker, self).__init__(*args, **kwargs)
    +
    +    def visit_module(self, node): # pylint: disable=unused-argument
    +        """Clear checker state after previous module."""
    +        self._future_division = False
    +        self._future_absolute_import = False
    +
    +    def visit_function(self, node):
    +        if node.is_method() and node.name in self._unused_magic_methods:
    +            method_name = node.name
    +            if node.name.startswith('__'):
    +                method_name = node.name[2:-2]
    +            self.add_message(method_name + '-method', node=node)
    +
    +    @utils.check_messages('parameter-unpacking')
    +    def visit_arguments(self, node):
    +        for arg in node.args:
    +            if isinstance(arg, astroid.Tuple):
    +                self.add_message('parameter-unpacking', node=arg)
    +
    +    def visit_name(self, node):
    +        """Detect when a "bad" built-in is referenced."""
    +        found_node = node.lookup(node.name)[0]
    +        if _is_builtin(found_node):
    +            if node.name in self._bad_builtins:
    +                message = node.name.lower() + '-builtin'
    +                self.add_message(message, node=node)
    +
    +    @utils.check_messages('print-statement')
    +    def visit_print(self, node):
    +        self.add_message('print-statement', node=node)
    +
    +    @utils.check_messages('no-absolute-import')
    +    def visit_from(self, node):
    +        if node.modname == '__future__':
    +            for name, _ in node.names:
    +                if name == 'division':
    +                    self._future_division = True
    +                elif name == 'absolute_import':
    +                    self._future_absolute_import = True
    +        elif not self._future_absolute_import:
    +            self.add_message('no-absolute-import', node=node)
    +
    +    @utils.check_messages('no-absolute-import')
    +    def visit_import(self, node):
    +        if not self._future_absolute_import:
    +            self.add_message('no-absolute-import', node=node)
    +
    +    @utils.check_messages('metaclass-assignment')
    +    def visit_class(self, node):
    +        if '__metaclass__' in node.locals:
    +            self.add_message('metaclass-assignment', node=node)
    +
    +    @utils.check_messages('old-division')
    +    def visit_binop(self, node):
    +        if not self._future_division and node.op == '/':
    +            for arg in (node.left, node.right):
    +                if isinstance(arg, astroid.Const) and isinstance(arg.value, float):
    +                    break
    +            else:
    +                self.add_message('old-division', node=node)
    +
    +    def _check_cmp_argument(self, node):
    +        # Check that the `cmp` argument is used
    +        args = []
    +        if (isinstance(node.func, astroid.Getattr)
    +                and node.func.attrname == 'sort'):
    +            inferred = utils.safe_infer(node.func.expr)
    +            if not inferred:
    +                return
    +
    +            builtins_list = "{}.list".format(bases.BUILTINS)
    +            if (isinstance(inferred, astroid.List)
    +                    or inferred.qname() == builtins_list):
    +                args = node.args
    +
    +        elif (isinstance(node.func, astroid.Name)
    +                and node.func.name == 'sorted'):
    +            inferred = utils.safe_infer(node.func)
    +            if not inferred:
    +                return
    +
    +            builtins_sorted = "{}.sorted".format(bases.BUILTINS)
    +            if inferred.qname() == builtins_sorted:
    +                args = node.args
    +
    +        for arg in args:
    +            if isinstance(arg, astroid.Keyword) and arg.arg == 'cmp':
    +                self.add_message('using-cmp-argument', node=node)
    +                return
    +
    +    def visit_callfunc(self, node):
    +        self._check_cmp_argument(node)
    +
    +        if isinstance(node.func, astroid.Getattr):
    +            if any([node.args, node.starargs, node.kwargs]):
    +                return
    +            if node.func.attrname == 'next':
    +                self.add_message('next-method-called', node=node)
    +            else:
    +                if _check_dict_node(node.func.expr):
    +                    if node.func.attrname in ('iterkeys', 'itervalues', 'iteritems'):
    +                        self.add_message('dict-iter-method', node=node)
    +                    elif node.func.attrname in ('viewkeys', 'viewvalues', 'viewitems'):
    +                        self.add_message('dict-view-method', node=node)
    +        elif isinstance(node.func, astroid.Name):
    +            found_node = node.func.lookup(node.func.name)[0]
    +            if _is_builtin(found_node):
    +                if node.func.name in ('filter', 'map', 'range', 'zip'):
    +                    if not _in_iterating_context(node):
    +                        checker = '{}-builtin-not-iterating'.format(node.func.name)
    +                        self.add_message(checker, node=node)
    +
    +
    +    @utils.check_messages('indexing-exception')
    +    def visit_subscript(self, node):
    +        """ Look for indexing exceptions. """
    +        try:
    +            for infered in node.value.infer():
    +                if not isinstance(infered, astroid.Instance):
    +                    continue
    +                if utils.inherit_from_std_ex(infered):
    +                    self.add_message('indexing-exception', node=node)
    +        except astroid.InferenceError:
    +            return
    +
    +    @utils.check_messages('unpacking-in-except')
    +    def visit_excepthandler(self, node):
    +        """Visit an except handler block and check for exception unpacking."""
    +        if isinstance(node.name, (astroid.Tuple, astroid.List)):
    +            self.add_message('unpacking-in-except', node=node)
    +
    +    @utils.check_messages('backtick')
    +    def visit_backquote(self, node):
    +        self.add_message('backtick', node=node)
    +
    +    @utils.check_messages('raising-string', 'old-raise-syntax')
    +    def visit_raise(self, node):
    +        """Visit a raise statement and check for raising
    +        strings or old-raise-syntax.
    +        """
    +        if (node.exc is not None and
    +                node.inst is not None and
    +                node.tback is None):
    +            self.add_message('old-raise-syntax', node=node)
    +
    +        # Ignore empty raise.
    +        if node.exc is None:
    +            return
    +        expr = node.exc
    +        if self._check_raise_value(node, expr):
    +            return
    +        else:
    +            try:
    +                value = next(astroid.unpack_infer(expr))
    +            except astroid.InferenceError:
    +                return
    +            self._check_raise_value(node, value)
    +
    +    def _check_raise_value(self, node, expr):
    +        if isinstance(expr, astroid.Const):
    +            value = expr.value
    +            if isinstance(value, str):
    +                self.add_message('raising-string', node=node)
    +                return True
    +
    +
    +class Python3TokenChecker(checkers.BaseTokenChecker):
    +    __implements__ = interfaces.ITokenChecker
    +    name = 'python3'
    +    enabled = False
    +
    +    msgs = {
    +        'E1606': ('Use of long suffix',
    +                  'long-suffix',
    +                  'Used when "l" or "L" is used to mark a long integer. '
    +                  'This will not work in Python 3, since `int` and `long` '
    +                  'types have merged.',
    +                  {'maxversion': (3, 0)}),
    +        'E1607': ('Use of the <> operator',
    +                  'old-ne-operator',
    +                  'Used when the deprecated "<>" operator is used instead '
    +                  'of "!=". This is removed in Python 3.',
    +                  {'maxversion': (3, 0),
    +                   'old_names': [('W0331', 'old-ne-operator')]}),
    +        'E1608': ('Use of old octal literal',
    +                  'old-octal-literal',
    +                  'Usen when encountering the old octal syntax, '
    +                  'removed in Python 3. To use the new syntax, '
    +                  'prepend 0o on the number.',
    +                  {'maxversion': (3, 0)}),
    +    }
    +
    +    def process_tokens(self, tokens):
    +        for idx, (tok_type, token, start, _, _) in enumerate(tokens):
    +            if tok_type == tokenize.NUMBER:
    +                if token.lower().endswith('l'):
    +                    # This has a different semantic than lowercase-l-suffix.
    +                    self.add_message('long-suffix', line=start[0])
    +                elif _is_old_octal(token):
    +                    self.add_message('old-octal-literal', line=start[0])
    +            if tokens[idx][1] == '<>':
    +                self.add_message('old-ne-operator', line=tokens[idx][2][0])
    +
    +
    +def register(linter):
    +    linter.register_checker(Python3Checker(linter))
    +    linter.register_checker(Python3TokenChecker(linter))
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/raw_metrics.py b/pymode/libs/pylint/checkers/raw_metrics.py
    similarity index 100%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/raw_metrics.py
    rename to pymode/libs/pylint/checkers/raw_metrics.py
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/similar.py b/pymode/libs/pylint/checkers/similar.py
    similarity index 91%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/similar.py
    rename to pymode/libs/pylint/checkers/similar.py
    index cf671bf6..95420776 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/similar.py
    +++ b/pymode/libs/pylint/checkers/similar.py
    @@ -16,14 +16,18 @@
     # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
     """a similarities / code duplication command line tool and pylint checker
     """
    +from __future__ import print_function
     import sys
    -from itertools import izip
    +from collections import defaultdict
     
     from logilab.common.ureports import Table
     
     from pylint.interfaces import IRawChecker
     from pylint.checkers import BaseChecker, table_lines_from_stats
     
    +import six
    +from six.moves import zip
    +
     
     class Similar(object):
         """finds copy-pasted lines of code in a project"""
    @@ -38,7 +42,6 @@ def __init__(self, min_lines=4, ignore_comments=False,
     
         def append_stream(self, streamid, stream, encoding=None):
             """append a file to search for similarities"""
    -        stream.seek(0) # XXX may be removed with astroid > 0.23
             if encoding is None:
                 readlines = stream.readlines
             else:
    @@ -58,9 +61,9 @@ def run(self):
     
         def _compute_sims(self):
             """compute similarities in appended files"""
    -        no_duplicates = {}
    +        no_duplicates = defaultdict(list)
             for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():
    -            duplicate = no_duplicates.setdefault(num, [])
    +            duplicate = no_duplicates[num]
                 for couples in duplicate:
                     if (lineset1, idx1) in couples or (lineset2, idx2) in couples:
                         couples.add((lineset1, idx1))
    @@ -69,7 +72,7 @@ def _compute_sims(self):
                 else:
                     duplicate.append(set([(lineset1, idx1), (lineset2, idx2)]))
             sims = []
    -        for num, ensembles in no_duplicates.iteritems():
    +        for num, ensembles in six.iteritems(no_duplicates):
                 for couples in ensembles:
                     sims.append((num, couples))
             sims.sort()
    @@ -80,19 +83,19 @@ def _display_sims(self, sims):
             """display computed similarities on stdout"""
             nb_lignes_dupliquees = 0
             for num, couples in sims:
    -            print
    -            print num, "similar lines in", len(couples), "files"
    +            print()
    +            print(num, "similar lines in", len(couples), "files")
                 couples = sorted(couples)
                 for lineset, idx in couples:
    -                print "==%s:%s" % (lineset.name, idx)
    +                print("==%s:%s" % (lineset.name, idx))
                 # pylint: disable=W0631
                 for line in lineset._real_lines[idx:idx+num]:
    -                print "  ", line.rstrip()
    +                print("  ", line.rstrip())
                 nb_lignes_dupliquees += num * (len(couples)-1)
             nb_total_lignes = sum([len(lineset) for lineset in self.linesets])
    -        print "TOTAL lines=%s duplicates=%s percent=%.2f" \
    +        print("TOTAL lines=%s duplicates=%s percent=%.2f" \
                 % (nb_total_lignes, nb_lignes_dupliquees,
    -               nb_lignes_dupliquees*100. / nb_total_lignes)
    +               nb_lignes_dupliquees*100. / nb_total_lignes))
     
         def _find_common(self, lineset1, lineset2):
             """find similarities in the two given linesets"""
    @@ -107,7 +110,7 @@ def _find_common(self, lineset1, lineset2):
                 for index2 in find(lineset1[index1]):
                     non_blank = 0
                     for num, ((_, line1), (_, line2)) in enumerate(
    -                    izip(lines1(index1), lines2(index2))):
    +                        zip(lines1(index1), lines2(index2))):
                         if line1 != line2:
                             if non_blank > min_lines:
                                 yield num, lineset1, index1, lineset2, index2
    @@ -207,10 +210,10 @@ def find(self, stripped_line):
     
         def _mk_index(self):
             """create the index for this set"""
    -        index = {}
    +        index = defaultdict(list)
             for line_no, line in enumerate(self._stripped_lines):
                 if line:
    -                index.setdefault(line, []).append(line_no)
    +                index[line].append(line_no)
             return index
     
     
    @@ -249,16 +252,16 @@ class SimilarChecker(BaseChecker, Similar):
                    ('ignore-comments',
                     {'default' : True, 'type' : 'yn', 'metavar' : '',
                      'help': 'Ignore comments when computing similarities.'}
    -                ),
    +               ),
                    ('ignore-docstrings',
                     {'default' : True, 'type' : 'yn', 'metavar' : '',
                      'help': 'Ignore docstrings when computing similarities.'}
    -                ),
    +               ),
                    ('ignore-imports',
                     {'default' : False, 'type' : 'yn', 'metavar' : '',
                      'help': 'Ignore imports when computing similarities.'}
    -                ),
    -               )
    +               ),
    +              )
         # reports
         reports = (('RP0801', 'Duplication', report_similarities),)
     
    @@ -296,7 +299,10 @@ def process_module(self, node):
     
             stream must implement the readlines method
             """
    -        self.append_stream(self.linter.current_name, node.file_stream, node.file_encoding)
    +        with node.stream() as stream:
    +            self.append_stream(self.linter.current_name,
    +                               stream,
    +                               node.file_encoding)
     
         def close(self):
             """compute and display similarities on closing (i.e. end of parsing)"""
    @@ -323,10 +329,10 @@ def register(linter):
     
     def usage(status=0):
         """display command line usage information"""
    -    print "finds copy pasted blocks in a set of files"
    -    print
    -    print 'Usage: symilar [-d|--duplicates min_duplicated_lines] \
    -[-i|--ignore-comments] [--ignore-docstrings] [--ignore-imports] file1...'
    +    print("finds copy pasted blocks in a set of files")
    +    print()
    +    print('Usage: symilar [-d|--duplicates min_duplicated_lines] \
    +[-i|--ignore-comments] [--ignore-docstrings] [--ignore-imports] file1...')
         sys.exit(status)
     
     def Run(argv=None):
    @@ -357,7 +363,8 @@ def Run(argv=None):
             usage(1)
         sim = Similar(min_lines, ignore_comments, ignore_docstrings, ignore_imports)
         for filename in args:
    -        sim.append_stream(filename, open(filename))
    +        with open(filename) as stream:
    +            sim.append_stream(filename, stream)
         sim.run()
         sys.exit(0)
     
    diff --git a/pymode/libs/pylint/checkers/spelling.py b/pymode/libs/pylint/checkers/spelling.py
    new file mode 100644
    index 00000000..f6edd5db
    --- /dev/null
    +++ b/pymode/libs/pylint/checkers/spelling.py
    @@ -0,0 +1,250 @@
    +# Copyright 2014 Michal Nowikowski.
    +#
    +# This program is free software; you can redistribute it and/or modify it under
    +# the terms of the GNU General Public License as published by the Free Software
    +# Foundation; either version 2 of the License, or (at your option) any later
    +# version.
    +#
    +# This program is distributed in the hope that it will be useful, but WITHOUT
    +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
    +#
    +# You should have received a copy of the GNU General Public License along with
    +# this program; if not, write to the Free Software Foundation, Inc.,
    +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    +"""Checker for spelling errors in comments and docstrings.
    +"""
    +
    +import sys
    +import tokenize
    +import string
    +import re
    +
    +if sys.version_info[0] >= 3:
    +    maketrans = str.maketrans
    +else:
    +    maketrans = string.maketrans
    +
    +from pylint.interfaces import ITokenChecker, IAstroidChecker
    +from pylint.checkers import BaseTokenChecker
    +from pylint.checkers.utils import check_messages
    +
    +try:
    +    import enchant
    +except ImportError:
    +    enchant = None
    +
    +if enchant is not None:
    +    br = enchant.Broker()
    +    dicts = br.list_dicts()
    +    dict_choices = [''] + [d[0] for d in dicts]
    +    dicts = ["%s (%s)" % (d[0], d[1].name) for d in dicts]
    +    dicts = ", ".join(dicts)
    +    instr = ""
    +else:
    +    dicts = "none"
    +    dict_choices = ['']
    +    instr = " To make it working install python-enchant package."
    +
    +table = maketrans("", "")
    +
    +class SpellingChecker(BaseTokenChecker):
    +    """Check spelling in comments and docstrings"""
    +    __implements__ = (ITokenChecker, IAstroidChecker)
    +    name = 'spelling'
    +    msgs = {
    +        'C0401': ('Wrong spelling of a word \'%s\' in a comment:\n%s\n'
    +                  '%s\nDid you mean: \'%s\'?',
    +                  'wrong-spelling-in-comment',
    +                  'Used when a word in comment is not spelled correctly.'),
    +        'C0402': ('Wrong spelling of a word \'%s\' in a docstring:\n%s\n'
    +                  '%s\nDid you mean: \'%s\'?',
    +                  'wrong-spelling-in-docstring',
    +                  'Used when a word in docstring is not spelled correctly.'),
    +        'C0403': ('Invalid characters %r in a docstring',
    +                  'invalid-characters-in-docstring',
    +                  'Used when a word in docstring cannot be checked by enchant.'),
    +        }
    +    options = (('spelling-dict',
    +                {'default' : '', 'type' : 'choice', 'metavar' : '',
    +                 'choices': dict_choices,
    +                 'help' : 'Spelling dictionary name. '
    +                          'Available dictionaries: %s.%s' % (dicts, instr)}),
    +               ('spelling-ignore-words',
    +                {'default' : '',
    +                 'type' : 'string',
    +                 'metavar' : '',
    +                 'help' : 'List of comma separated words that '
    +                          'should not be checked.'}),
    +               ('spelling-private-dict-file',
    +                {'default' : '',
    +                 'type' : 'string',
    +                 'metavar' : '',
    +                 'help' : 'A path to a file that contains private '
    +                          'dictionary; one word per line.'}),
    +               ('spelling-store-unknown-words',
    +                {'default' : 'n', 'type' : 'yn', 'metavar' : '',
    +                 'help' : 'Tells whether to store unknown words to '
    +                          'indicated private dictionary in '
    +                          '--spelling-private-dict-file option instead of '
    +                          'raising a message.'}),
    +              )
    +
    +    def open(self):
    +        self.initialized = False
    +        self.private_dict_file = None
    +
    +        if enchant is None:
    +            return
    +        dict_name = self.config.spelling_dict
    +        if not dict_name:
    +            return
    +
    +        self.ignore_list = [w.strip() for w in self.config.spelling_ignore_words.split(",")]
    +        # "param" appears in docstring in param description and
    +        # "pylint" appears in comments in pylint pragmas.
    +        self.ignore_list.extend(["param", "pylint"])
    +
    +        if self.config.spelling_private_dict_file:
    +            self.spelling_dict = enchant.DictWithPWL(
    +                dict_name, self.config.spelling_private_dict_file)
    +            self.private_dict_file = open(
    +                self.config.spelling_private_dict_file, "a")
    +        else:
    +            self.spelling_dict = enchant.Dict(dict_name)
    +
    +        if self.config.spelling_store_unknown_words:
    +            self.unknown_words = set()
    +
    +        # Prepare regex for stripping punctuation signs from text.
    +        # ' and _ are treated in a special way.
    +        puncts = string.punctuation.replace("'", "").replace("_", "")
    +        self.punctuation_regex = re.compile('[%s]' % re.escape(puncts))
    +        self.initialized = True
    +
    +    def close(self):
    +        if self.private_dict_file:
    +            self.private_dict_file.close()
    +
    +    def _check_spelling(self, msgid, line, line_num):
    +        line2 = line.strip()
    +        # Replace ['afadf with afadf (but preserve don't)
    +        line2 = re.sub("'([^a-zA-Z]|$)", " ", line2)
    +        # Replace afadf'] with afadf (but preserve don't)
    +        line2 = re.sub("([^a-zA-Z]|^)'", " ", line2)
    +        # Replace punctuation signs with space e.g. and/or -> and or
    +        line2 = self.punctuation_regex.sub(' ', line2)
    +
    +        words = []
    +        for word in line2.split():
    +            # Skip words with digits.
    +            if len(re.findall(r"\d", word)) > 0:
    +                continue
    +
    +            # Skip words with mixed big and small letters,
    +            # they are probaly class names.
    +            if (len(re.findall("[A-Z]", word)) > 0 and
    +                    len(re.findall("[a-z]", word)) > 0 and
    +                    len(word) > 2):
    +                continue
    +
    +            # Skip words with _ - they are probably function parameter names.
    +            if word.count('_') > 0:
    +                continue
    +
    +            words.append(word)
    +
    +        # Go through words and check them.
    +        for word in words:
    +            # Skip words from ignore list.
    +            if word in self.ignore_list:
    +                continue
    +
    +            orig_word = word
    +            word = word.lower()
    +
    +            # Strip starting u' from unicode literals and r' from raw strings.
    +            if (word.startswith("u'") or
    +                    word.startswith('u"') or
    +                    word.startswith("r'") or
    +                    word.startswith('r"')) and len(word) > 2:
    +                word = word[2:]
    +
    +            # If it is a known word, then continue.
    +            try:
    +                if self.spelling_dict.check(word):
    +                    continue
    +            except enchant.errors.Error:
    +                # this can only happen in docstrings, not comments
    +                self.add_message('invalid-characters-in-docstring',
    +                                 line=line_num, args=(word,))
    +                continue
    +
    +            # Store word to private dict or raise a message.
    +            if self.config.spelling_store_unknown_words:
    +                if word not in self.unknown_words:
    +                    self.private_dict_file.write("%s\n" % word)
    +                    self.unknown_words.add(word)
    +            else:
    +                # Present up to 4 suggestions.
    +                # TODO: add support for customising this.
    +                suggestions = self.spelling_dict.suggest(word)[:4]
    +
    +                m = re.search(r"(\W|^)(%s)(\W|$)" % word, line.lower())
    +                if m:
    +                    # Start position of second group in regex.
    +                    col = m.regs[2][0]
    +                else:
    +                    col = line.lower().index(word)
    +                indicator = (" " * col) + ("^" * len(word))
    +
    +                self.add_message(msgid, line=line_num,
    +                                 args=(orig_word, line,
    +                                       indicator,
    +                                       "' or '".join(suggestions)))
    +
    +    def process_tokens(self, tokens):
    +        if not self.initialized:
    +            return
    +
    +        # Process tokens and look for comments.
    +        for (tok_type, token, (start_row, _), _, _) in tokens:
    +            if tok_type == tokenize.COMMENT:
    +                self._check_spelling('wrong-spelling-in-comment',
    +                                     token, start_row)
    +
    +    @check_messages('wrong-spelling-in-docstring')
    +    def visit_module(self, node):
    +        if not self.initialized:
    +            return
    +        self._check_docstring(node)
    +
    +    @check_messages('wrong-spelling-in-docstring')
    +    def visit_class(self, node):
    +        if not self.initialized:
    +            return
    +        self._check_docstring(node)
    +
    +    @check_messages('wrong-spelling-in-docstring')
    +    def visit_function(self, node):
    +        if not self.initialized:
    +            return
    +        self._check_docstring(node)
    +
    +    def _check_docstring(self, node):
    +        """check the node has any spelling errors"""
    +        docstring = node.doc
    +        if not docstring:
    +            return
    +
    +        start_line = node.lineno + 1
    +
    +        # Go through lines of docstring
    +        for idx, line in enumerate(docstring.splitlines()):
    +            self._check_spelling('wrong-spelling-in-docstring',
    +                                 line, start_line + idx)
    +
    +
    +def register(linter):
    +    """required method to auto register this checker """
    +    linter.register_checker(SpellingChecker(linter))
    diff --git a/pymode/libs/pylint/checkers/stdlib.py b/pymode/libs/pylint/checkers/stdlib.py
    new file mode 100644
    index 00000000..a3a61063
    --- /dev/null
    +++ b/pymode/libs/pylint/checkers/stdlib.py
    @@ -0,0 +1,216 @@
    +# Copyright 2012 Google Inc.
    +#
    +# http://www.logilab.fr/ -- mailto:contact@logilab.fr
    +# This program is free software; you can redistribute it and/or modify it under
    +# the terms of the GNU General Public License as published by the Free Software
    +# Foundation; either version 2 of the License, or (at your option) any later
    +# version.
    +#
    +# This program is distributed in the hope that it will be useful, but WITHOUT
    +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
    +#
    +# You should have received a copy of the GNU General Public License along with
    +# this program; if not, write to the Free Software Foundation, Inc.,
    +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    +"""Checkers for various standard library functions."""
    +
    +import six
    +import sys
    +
    +import astroid
    +from astroid.bases import Instance
    +
    +from pylint.interfaces import IAstroidChecker
    +from pylint.checkers import BaseChecker
    +from pylint.checkers import utils
    +
    +
    +TYPECHECK_COMPARISON_OPERATORS = frozenset(('is', 'is not', '==', '!=', 'in', 'not in'))
    +LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
    +
    +if sys.version_info >= (3, 0):
    +    OPEN_MODULE = '_io'
    +    TYPE_QNAME = 'builtins.type'
    +else:
    +    OPEN_MODULE = '__builtin__'
    +    TYPE_QNAME = '__builtin__.type'
    +
    +
    +def _check_mode_str(mode):
    +    # check type
    +    if not isinstance(mode, six.string_types):
    +        return False
    +    # check syntax
    +    modes = set(mode)
    +    _mode = "rwatb+U"
    +    creating = False
    +    if six.PY3:
    +        _mode += "x"
    +        creating = "x" in modes
    +    if modes - set(_mode) or len(mode) > len(modes):
    +        return False
    +    # check logic
    +    reading = "r" in modes
    +    writing = "w" in modes
    +    appending = "a" in modes
    +    text = "t" in modes
    +    binary = "b" in modes
    +    if "U" in modes:
    +        if writing or appending or creating and six.PY3:
    +            return False
    +        reading = True
    +        if not six.PY3:
    +            binary = True
    +    if text and binary:
    +        return False
    +    total = reading + writing + appending + (creating if six.PY3 else 0)
    +    if total > 1:
    +        return False
    +    if not (reading or writing or appending or creating and six.PY3):
    +        return False
    +    # other 2.x constraints
    +    if not six.PY3:
    +        if "U" in mode:
    +            mode = mode.replace("U", "")
    +            if "r" not in mode:
    +                mode = "r" + mode
    +        return mode[0] in ("r", "w", "a", "U")
    +    return True
    +
    +
    +def _is_one_arg_pos_call(call):
    +    """Is this a call with exactly 1 argument,
    +    where that argument is positional?
    +    """
    +    return (isinstance(call, astroid.CallFunc)
    +            and len(call.args) == 1
    +            and not isinstance(call.args[0], astroid.Keyword))
    +
    +
    +class StdlibChecker(BaseChecker):
    +    __implements__ = (IAstroidChecker,)
    +    name = 'stdlib'
    +
    +    msgs = {
    +        'W1501': ('"%s" is not a valid mode for open.',
    +                  'bad-open-mode',
    +                  'Python supports: r, w, a[, x] modes with b, +, '
    +                  'and U (only with r) options. '
    +                  'See http://docs.python.org/2/library/functions.html#open'),
    +        'W1502': ('Using datetime.time in a boolean context.',
    +                  'boolean-datetime',
    +                  'Using datetime.time in a boolean context can hide '
    +                  'subtle bugs when the time they represent matches '
    +                  'midnight UTC. This behaviour was fixed in Python 3.5. '
    +                  'See http://bugs.python.org/issue13936 for reference.',
    +                  {'maxversion': (3, 5)}),
    +        'W1503': ('Redundant use of %s with constant '
    +                  'value %r',
    +                  'redundant-unittest-assert',
    +                  'The first argument of assertTrue and assertFalse is '
    +                  'a condition. If a constant is passed as parameter, that '
    +                  'condition will be always true. In this case a warning '
    +                  'should be emitted.'),
    +        'W1504': ('Using type() instead of isinstance() for a typecheck.',
    +                  'unidiomatic-typecheck',
    +                  'The idiomatic way to perform an explicit typecheck in '
    +                  'Python is to use isinstance(x, Y) rather than '
    +                  'type(x) == Y, type(x) is Y. Though there are unusual '
    +                  'situations where these give different results.')
    +    }
    +
    +    @utils.check_messages('bad-open-mode', 'redundant-unittest-assert')
    +    def visit_callfunc(self, node):
    +        """Visit a CallFunc node."""
    +        if hasattr(node, 'func'):
    +            infer = utils.safe_infer(node.func)
    +            if infer:
    +                if infer.root().name == OPEN_MODULE:
    +                    if getattr(node.func, 'name', None) in ('open', 'file'):
    +                        self._check_open_mode(node)
    +                if infer.root().name == 'unittest.case':
    +                    self._check_redundant_assert(node, infer)
    +
    +    @utils.check_messages('boolean-datetime')
    +    def visit_unaryop(self, node):
    +        if node.op == 'not':
    +            self._check_datetime(node.operand)
    +
    +    @utils.check_messages('boolean-datetime')
    +    def visit_if(self, node):
    +        self._check_datetime(node.test)
    +
    +    @utils.check_messages('boolean-datetime')
    +    def visit_ifexp(self, node):
    +        self._check_datetime(node.test)
    +
    +    @utils.check_messages('boolean-datetime')
    +    def visit_boolop(self, node):
    +        for value in node.values:
    +            self._check_datetime(value)
    +
    +    @utils.check_messages('unidiomatic-typecheck')
    +    def visit_compare(self, node):
    +        operator, right = node.ops[0]
    +        if operator in TYPECHECK_COMPARISON_OPERATORS:
    +            left = node.left
    +            if _is_one_arg_pos_call(left):
    +                self._check_type_x_is_y(node, left, operator, right)
    +
    +    def _check_redundant_assert(self, node, infer):
    +        if (isinstance(infer, astroid.BoundMethod) and
    +                node.args and isinstance(node.args[0], astroid.Const) and
    +                infer.name in ['assertTrue', 'assertFalse']):
    +            self.add_message('redundant-unittest-assert',
    +                             args=(infer.name, node.args[0].value, ),
    +                             node=node)
    +
    +    def _check_datetime(self, node):
    +        """ Check that a datetime was infered.
    +        If so, emit boolean-datetime warning.
    +        """
    +        try:
    +            infered = next(node.infer())
    +        except astroid.InferenceError:
    +            return
    +        if (isinstance(infered, Instance) and
    +                infered.qname() == 'datetime.time'):
    +            self.add_message('boolean-datetime', node=node)
    +
    +    def _check_open_mode(self, node):
    +        """Check that the mode argument of an open or file call is valid."""
    +        try:
    +            mode_arg = utils.get_argument_from_call(node, position=1,
    +                                                    keyword='mode')
    +        except utils.NoSuchArgumentError:
    +            return
    +        if mode_arg:
    +            mode_arg = utils.safe_infer(mode_arg)
    +            if (isinstance(mode_arg, astroid.Const)
    +                    and not _check_mode_str(mode_arg.value)):
    +                self.add_message('bad-open-mode', node=node,
    +                                 args=mode_arg.value)
    +
    +    def _check_type_x_is_y(self, node, left, operator, right):
    +        """Check for expressions like type(x) == Y."""
    +        left_func = utils.safe_infer(left.func)
    +        if not (isinstance(left_func, astroid.Class)
    +                and left_func.qname() == TYPE_QNAME):
    +            return
    +
    +        if operator in ('is', 'is not') and _is_one_arg_pos_call(right):
    +            right_func = utils.safe_infer(right.func)
    +            if (isinstance(right_func, astroid.Class)
    +                    and right_func.qname() == TYPE_QNAME):
    +                # type(x) == type(a)
    +                right_arg = utils.safe_infer(right.args[0])
    +                if not isinstance(right_arg, LITERAL_NODE_TYPES):
    +                    # not e.g. type(x) == type([])
    +                    return
    +        self.add_message('unidiomatic-typecheck', node=node)
    +
    +
    +def register(linter):
    +    """required method to auto register this checker """
    +    linter.register_checker(StdlibChecker(linter))
    diff --git a/pymode/libs/pylint/checkers/strings.py b/pymode/libs/pylint/checkers/strings.py
    new file mode 100644
    index 00000000..8892c2cc
    --- /dev/null
    +++ b/pymode/libs/pylint/checkers/strings.py
    @@ -0,0 +1,615 @@
    +# Copyright (c) 2009-2010 Arista Networks, Inc. - James Lingard
    +# Copyright (c) 2004-2013 LOGILAB S.A. (Paris, FRANCE).
    +# Copyright 2012 Google Inc.
    +#
    +# http://www.logilab.fr/ -- mailto:contact@logilab.fr
    +# This program is free software; you can redistribute it and/or modify it under
    +# the terms of the GNU General Public License as published by the Free Software
    +# Foundation; either version 2 of the License, or (at your option) any later
    +# version.
    +#
    +# This program is distributed in the hope that it will be useful, but WITHOUT
    +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
    +#
    +# You should have received a copy of the GNU General Public License along with
    +# this program; if not, write to the Free Software Foundation, Inc.,
    +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    +"""Checker for string formatting operations.
    +"""
    +
    +import sys
    +import tokenize
    +import string
    +import numbers
    +
    +import astroid
    +
    +from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
    +from pylint.checkers import BaseChecker, BaseTokenChecker
    +from pylint.checkers import utils
    +from pylint.checkers.utils import check_messages
    +
    +import six
    +
    +
    +_PY3K = sys.version_info[:2] >= (3, 0)
    +_PY27 = sys.version_info[:2] == (2, 7)
    +
    +MSGS = {
    +    'E1300': ("Unsupported format character %r (%#02x) at index %d",
    +              "bad-format-character",
    +              "Used when a unsupported format character is used in a format\
    +              string."),
    +    'E1301': ("Format string ends in middle of conversion specifier",
    +              "truncated-format-string",
    +              "Used when a format string terminates before the end of a \
    +              conversion specifier."),
    +    'E1302': ("Mixing named and unnamed conversion specifiers in format string",
    +              "mixed-format-string",
    +              "Used when a format string contains both named (e.g. '%(foo)d') \
    +              and unnamed (e.g. '%d') conversion specifiers.  This is also \
    +              used when a named conversion specifier contains * for the \
    +              minimum field width and/or precision."),
    +    'E1303': ("Expected mapping for format string, not %s",
    +              "format-needs-mapping",
    +              "Used when a format string that uses named conversion specifiers \
    +              is used with an argument that is not a mapping."),
    +    'W1300': ("Format string dictionary key should be a string, not %s",
    +              "bad-format-string-key",
    +              "Used when a format string that uses named conversion specifiers \
    +              is used with a dictionary whose keys are not all strings."),
    +    'W1301': ("Unused key %r in format string dictionary",
    +              "unused-format-string-key",
    +              "Used when a format string that uses named conversion specifiers \
    +              is used with a dictionary that conWtains keys not required by the \
    +              format string."),
    +    'E1304': ("Missing key %r in format string dictionary",
    +              "missing-format-string-key",
    +              "Used when a format string that uses named conversion specifiers \
    +              is used with a dictionary that doesn't contain all the keys \
    +              required by the format string."),
    +    'E1305': ("Too many arguments for format string",
    +              "too-many-format-args",
    +              "Used when a format string that uses unnamed conversion \
    +              specifiers is given too many arguments."),
    +    'E1306': ("Not enough arguments for format string",
    +              "too-few-format-args",
    +              "Used when a format string that uses unnamed conversion \
    +              specifiers is given too few arguments"),
    +
    +    'W1302': ("Invalid format string",
    +              "bad-format-string",
    +              "Used when a PEP 3101 format string is invalid.",
    +              {'minversion': (2, 7)}),
    +    'W1303': ("Missing keyword argument %r for format string",
    +              "missing-format-argument-key",
    +              "Used when a PEP 3101 format string that uses named fields "
    +              "doesn't receive one or more required keywords.",
    +              {'minversion': (2, 7)}),
    +    'W1304': ("Unused format argument %r",
    +              "unused-format-string-argument",
    +              "Used when a PEP 3101 format string that uses named "
    +              "fields is used with an argument that "
    +              "is not required by the format string.",
    +              {'minversion': (2, 7)}),
    +    'W1305': ("Format string contains both automatic field numbering "
    +              "and manual field specification",
    +              "format-combined-specification",
    +              "Usen when a PEP 3101 format string contains both automatic "
    +              "field numbering (e.g. '{}') and manual field "
    +              "specification (e.g. '{0}').",
    +              {'minversion': (2, 7)}),
    +    'W1306': ("Missing format attribute %r in format specifier %r",
    +              "missing-format-attribute",
    +              "Used when a PEP 3101 format string uses an "
    +              "attribute specifier ({0.length}), but the argument "
    +              "passed for formatting doesn't have that attribute.",
    +              {'minversion': (2, 7)}),
    +    'W1307': ("Using invalid lookup key %r in format specifier %r",
    +              "invalid-format-index",
    +              "Used when a PEP 3101 format string uses a lookup specifier "
    +              "({a[1]}), but the argument passed for formatting "
    +              "doesn't contain or doesn't have that key as an attribute.",
    +              {'minversion': (2, 7)})
    +    }
    +
    +OTHER_NODES = (astroid.Const, astroid.List, astroid.Backquote,
    +               astroid.Lambda, astroid.Function,
    +               astroid.ListComp, astroid.SetComp, astroid.GenExpr)
    +
    +if _PY3K:
    +    import _string
    +
    +    def split_format_field_names(format_string):
    +        return _string.formatter_field_name_split(format_string)
    +else:
    +    def _field_iterator_convertor(iterator):
    +        for is_attr, key in iterator:
    +            if isinstance(key, numbers.Number):
    +                yield is_attr, int(key)
    +            else:
    +                yield is_attr, key
    +
    +    def split_format_field_names(format_string):
    +        keyname, fielditerator = format_string._formatter_field_name_split()
    +        # it will return longs, instead of ints, which will complicate
    +        # the output
    +        return keyname, _field_iterator_convertor(fielditerator)
    +
    +
    +def collect_string_fields(format_string):
    +    """ Given a format string, return an iterator
    +    of all the valid format fields. It handles nested fields
    +    as well.
    +    """
    +
    +    formatter = string.Formatter()
    +    try:
    +        parseiterator = formatter.parse(format_string)
    +        for result in parseiterator:
    +            if all(item is None for item in result[1:]):
    +                # not a replacement format
    +                continue
    +            name = result[1]
    +            nested = result[2]
    +            yield name
    +            if nested:
    +                for field in collect_string_fields(nested):
    +                    yield field
    +    except ValueError:
    +        # probably the format string is invalid
    +        # should we check the argument of the ValueError?
    +        raise utils.IncompleteFormatString(format_string)
    +
    +def parse_format_method_string(format_string):
    +    """
    +    Parses a PEP 3101 format string, returning a tuple of
    +    (keys, num_args, manual_pos_arg),
    +    where keys is the set of mapping keys in the format string, num_args
    +    is the number of arguments required by the format string and
    +    manual_pos_arg is the number of arguments passed with the position.
    +    """
    +    keys = []
    +    num_args = 0
    +    manual_pos_arg = set()
    +    for name in collect_string_fields(format_string):
    +        if name and str(name).isdigit():
    +            manual_pos_arg.add(str(name))
    +        elif name:
    +            keyname, fielditerator = split_format_field_names(name)
    +            if isinstance(keyname, numbers.Number):
    +                # In Python 2 it will return long which will lead
    +                # to different output between 2 and 3
    +                manual_pos_arg.add(str(keyname))
    +                keyname = int(keyname)
    +            keys.append((keyname, list(fielditerator)))
    +        else:
    +            num_args += 1
    +    return keys, num_args, len(manual_pos_arg)
    +
    +def get_args(callfunc):
    +    """ Get the arguments from the given `CallFunc` node.
    +    Return a tuple, where the first element is the
    +    number of positional arguments and the second element
    +    is the keyword arguments in a dict.
    +    """
    +    positional = 0
    +    named = {}
    +
    +    for arg in callfunc.args:
    +        if isinstance(arg, astroid.Keyword):
    +            named[arg.arg] = utils.safe_infer(arg.value)
    +        else:
    +            positional += 1
    +    return positional, named
    +
    +def get_access_path(key, parts):
    +    """ Given a list of format specifiers, returns
    +    the final access path (e.g. a.b.c[0][1]).
    +    """
    +    path = []
    +    for is_attribute, specifier in parts:
    +        if is_attribute:
    +            path.append(".{}".format(specifier))
    +        else:
    +            path.append("[{!r}]".format(specifier))
    +    return str(key) + "".join(path)
    +
    +
    +class StringFormatChecker(BaseChecker):
    +    """Checks string formatting operations to ensure that the format string
    +    is valid and the arguments match the format string.
    +    """
    +
    +    __implements__ = (IAstroidChecker,)
    +    name = 'string'
    +    msgs = MSGS
    +
    +    @check_messages(*(MSGS.keys()))
    +    def visit_binop(self, node):
    +        if node.op != '%':
    +            return
    +        left = node.left
    +        args = node.right
    +
    +        if not (isinstance(left, astroid.Const)
    +                and isinstance(left.value, six.string_types)):
    +            return
    +        format_string = left.value
    +        try:
    +            required_keys, required_num_args = \
    +                utils.parse_format_string(format_string)
    +        except utils.UnsupportedFormatCharacter as e:
    +            c = format_string[e.index]
    +            self.add_message('bad-format-character',
    +                             node=node, args=(c, ord(c), e.index))
    +            return
    +        except utils.IncompleteFormatString:
    +            self.add_message('truncated-format-string', node=node)
    +            return
    +        if required_keys and required_num_args:
    +            # The format string uses both named and unnamed format
    +            # specifiers.
    +            self.add_message('mixed-format-string', node=node)
    +        elif required_keys:
    +            # The format string uses only named format specifiers.
    +            # Check that the RHS of the % operator is a mapping object
    +            # that contains precisely the set of keys required by the
    +            # format string.
    +            if isinstance(args, astroid.Dict):
    +                keys = set()
    +                unknown_keys = False
    +                for k, _ in args.items:
    +                    if isinstance(k, astroid.Const):
    +                        key = k.value
    +                        if isinstance(key, six.string_types):
    +                            keys.add(key)
    +                        else:
    +                            self.add_message('bad-format-string-key',
    +                                             node=node, args=key)
    +                    else:
    +                        # One of the keys was something other than a
    +                        # constant.  Since we can't tell what it is,
    +                        # supress checks for missing keys in the
    +                        # dictionary.
    +                        unknown_keys = True
    +                if not unknown_keys:
    +                    for key in required_keys:
    +                        if key not in keys:
    +                            self.add_message('missing-format-string-key',
    +                                             node=node, args=key)
    +                for key in keys:
    +                    if key not in required_keys:
    +                        self.add_message('unused-format-string-key',
    +                                         node=node, args=key)
    +            elif isinstance(args, OTHER_NODES + (astroid.Tuple,)):
    +                type_name = type(args).__name__
    +                self.add_message('format-needs-mapping',
    +                                 node=node, args=type_name)
    +            # else:
    +                # The RHS of the format specifier is a name or
    +                # expression.  It may be a mapping object, so
    +                # there's nothing we can check.
    +        else:
    +            # The format string uses only unnamed format specifiers.
    +            # Check that the number of arguments passed to the RHS of
    +            # the % operator matches the number required by the format
    +            # string.
    +            if isinstance(args, astroid.Tuple):
    +                num_args = len(args.elts)
    +            elif isinstance(args, OTHER_NODES + (astroid.Dict, astroid.DictComp)):
    +                num_args = 1
    +            else:
    +                # The RHS of the format specifier is a name or
    +                # expression.  It could be a tuple of unknown size, so
    +                # there's nothing we can check.
    +                num_args = None
    +            if num_args is not None:
    +                if num_args > required_num_args:
    +                    self.add_message('too-many-format-args', node=node)
    +                elif num_args < required_num_args:
    +                    self.add_message('too-few-format-args', node=node)
    +
    +
    +class StringMethodsChecker(BaseChecker):
    +    __implements__ = (IAstroidChecker,)
    +    name = 'string'
    +    msgs = {
    +        'E1310': ("Suspicious argument in %s.%s call",
    +                  "bad-str-strip-call",
    +                  "The argument to a str.{l,r,}strip call contains a"
    +                  " duplicate character, "),
    +        }
    +
    +    @check_messages(*(MSGS.keys()))
    +    def visit_callfunc(self, node):
    +        func = utils.safe_infer(node.func)
    +        if (isinstance(func, astroid.BoundMethod)
    +                and isinstance(func.bound, astroid.Instance)
    +                and func.bound.name in ('str', 'unicode', 'bytes')):
    +            if func.name in ('strip', 'lstrip', 'rstrip') and node.args:
    +                arg = utils.safe_infer(node.args[0])
    +                if not isinstance(arg, astroid.Const):
    +                    return
    +                if len(arg.value) != len(set(arg.value)):
    +                    self.add_message('bad-str-strip-call', node=node,
    +                                     args=(func.bound.name, func.name))
    +            elif func.name == 'format':
    +                if _PY27 or _PY3K:
    +                    self._check_new_format(node, func)
    +
    +    def _check_new_format(self, node, func):
    +        """ Check the new string formatting. """
    +        # TODO: skip (for now) format nodes which don't have
    +        #       an explicit string on the left side of the format operation.
    +        #       We do this because our inference engine can't properly handle
    +        #       redefinitions of the original string.
    +        #       For more details, see issue 287.
    +        #
    +        # Note that there may not be any left side at all, if the format method
    +        # has been assigned to another variable. See issue 351. For example:
    +        #
    +        #    fmt = 'some string {}'.format
    +        #    fmt('arg')
    +        if (isinstance(node.func, astroid.Getattr)
    +                and not isinstance(node.func.expr, astroid.Const)):
    +            return
    +        try:
    +            strnode = next(func.bound.infer())
    +        except astroid.InferenceError:
    +            return
    +        if not isinstance(strnode, astroid.Const):
    +            return
    +        if node.starargs or node.kwargs:
    +            # TODO: Don't complicate the logic, skip these for now.
    +            return
    +        try:
    +            positional, named = get_args(node)
    +        except astroid.InferenceError:
    +            return
    +        try:
    +            fields, num_args, manual_pos = parse_format_method_string(strnode.value)
    +        except utils.IncompleteFormatString:
    +            self.add_message('bad-format-string', node=node)
    +            return
    +
    +        named_fields = set(field[0] for field in fields
    +                           if isinstance(field[0], six.string_types))
    +        if num_args and manual_pos:
    +            self.add_message('format-combined-specification',
    +                             node=node)
    +            return
    +
    +        check_args = False
    +        # Consider "{[0]} {[1]}" as num_args.
    +        num_args += sum(1 for field in named_fields
    +                        if field == '')
    +        if named_fields:
    +            for field in named_fields:
    +                if field not in named and field:
    +                    self.add_message('missing-format-argument-key',
    +                                     node=node,
    +                                     args=(field, ))
    +            for field in named:
    +                if field not in named_fields:
    +                    self.add_message('unused-format-string-argument',
    +                                     node=node,
    +                                     args=(field, ))
    +            # num_args can be 0 if manual_pos is not.
    +            num_args = num_args or manual_pos
    +            if positional or num_args:
    +                empty = any(True for field in named_fields
    +                            if field == '')
    +                if named or empty:
    +                    # Verify the required number of positional arguments
    +                    # only if the .format got at least one keyword argument.
    +                    # This means that the format strings accepts both
    +                    # positional and named fields and we should warn
    +                    # when one of the them is missing or is extra.
    +                    check_args = True
    +        else:
    +            check_args = True
    +        if check_args:
    +            # num_args can be 0 if manual_pos is not.
    +            num_args = num_args or manual_pos
    +            if positional > num_args:
    +                self.add_message('too-many-format-args', node=node)
    +            elif positional < num_args:
    +                self.add_message('too-few-format-args', node=node)
    +
    +        self._check_new_format_specifiers(node, fields, named)
    +
    +    def _check_new_format_specifiers(self, node, fields, named):
    +        """
    +        Check attribute and index access in the format
    +        string ("{0.a}" and "{0[a]}").
    +        """
    +        for key, specifiers in fields:
    +            # Obtain the argument. If it can't be obtained
    +            # or infered, skip this check.
    +            if key == '':
    +                # {[0]} will have an unnamed argument, defaulting
    +                # to 0. It will not be present in `named`, so use the value
    +                # 0 for it.
    +                key = 0
    +            if isinstance(key, numbers.Number):
    +                try:
    +                    argname = utils.get_argument_from_call(node, key)
    +                except utils.NoSuchArgumentError:
    +                    continue
    +            else:
    +                if key not in named:
    +                    continue
    +                argname = named[key]
    +            if argname in (astroid.YES, None):
    +                continue
    +            try:
    +                argument = next(argname.infer())
    +            except astroid.InferenceError:
    +                continue
    +            if not specifiers or argument is astroid.YES:
    +                # No need to check this key if it doesn't
    +                # use attribute / item access
    +                continue
    +            if argument.parent and isinstance(argument.parent, astroid.Arguments):
    +                # Ignore any object coming from an argument,
    +                # because we can't infer its value properly.
    +                continue
    +            previous = argument
    +            parsed = []
    +            for is_attribute, specifier in specifiers:
    +                if previous is astroid.YES:
    +                    break
    +                parsed.append((is_attribute, specifier))
    +                if is_attribute:
    +                    try:
    +                        previous = previous.getattr(specifier)[0]
    +                    except astroid.NotFoundError:
    +                        if (hasattr(previous, 'has_dynamic_getattr') and
    +                                previous.has_dynamic_getattr()):
    +                            # Don't warn if the object has a custom __getattr__
    +                            break
    +                        path = get_access_path(key, parsed)
    +                        self.add_message('missing-format-attribute',
    +                                         args=(specifier, path),
    +                                         node=node)
    +                        break
    +                else:
    +                    warn_error = False
    +                    if hasattr(previous, 'getitem'):
    +                        try:
    +                            previous = previous.getitem(specifier)
    +                        except (IndexError, TypeError):
    +                            warn_error = True
    +                    else:
    +                        try:
    +                            # Lookup __getitem__ in the current node,
    +                            # but skip further checks, because we can't
    +                            # retrieve the looked object
    +                            previous.getattr('__getitem__')
    +                            break
    +                        except astroid.NotFoundError:
    +                            warn_error = True
    +                    if warn_error:
    +                        path = get_access_path(key, parsed)
    +                        self.add_message('invalid-format-index',
    +                                         args=(specifier, path),
    +                                         node=node)
    +                        break
    +
    +                try:
    +                    previous = next(previous.infer())
    +                except astroid.InferenceError:
    +                    # can't check further if we can't infer it
    +                    break
    +
    +
    +
    +class StringConstantChecker(BaseTokenChecker):
    +    """Check string literals"""
    +    __implements__ = (ITokenChecker, IRawChecker)
    +    name = 'string_constant'
    +    msgs = {
    +        'W1401': ('Anomalous backslash in string: \'%s\'. '
    +                  'String constant might be missing an r prefix.',
    +                  'anomalous-backslash-in-string',
    +                  'Used when a backslash is in a literal string but not as an '
    +                  'escape.'),
    +        'W1402': ('Anomalous Unicode escape in byte string: \'%s\'. '
    +                  'String constant might be missing an r or u prefix.',
    +                  'anomalous-unicode-escape-in-string',
    +                  'Used when an escape like \\u is encountered in a byte '
    +                  'string where it has no effect.'),
    +        }
    +
    +    # Characters that have a special meaning after a backslash in either
    +    # Unicode or byte strings.
    +    ESCAPE_CHARACTERS = 'abfnrtvx\n\r\t\\\'\"01234567'
    +
    +    # TODO(mbp): Octal characters are quite an edge case today; people may
    +    # prefer a separate warning where they occur.  \0 should be allowed.
    +
    +    # Characters that have a special meaning after a backslash but only in
    +    # Unicode strings.
    +    UNICODE_ESCAPE_CHARACTERS = 'uUN'
    +
    +    def process_module(self, module):
    +        self._unicode_literals = 'unicode_literals' in module.future_imports
    +
    +    def process_tokens(self, tokens):
    +        for (tok_type, token, (start_row, _), _, _) in tokens:
    +            if tok_type == tokenize.STRING:
    +                # 'token' is the whole un-parsed token; we can look at the start
    +                # of it to see whether it's a raw or unicode string etc.
    +                self.process_string_token(token, start_row)
    +
    +    def process_string_token(self, token, start_row):
    +        for i, c in enumerate(token):
    +            if c in '\'\"':
    +                quote_char = c
    +                break
    +        # pylint: disable=undefined-loop-variable
    +        prefix = token[:i].lower() #  markers like u, b, r.
    +        after_prefix = token[i:]
    +        if after_prefix[:3] == after_prefix[-3:] == 3 * quote_char:
    +            string_body = after_prefix[3:-3]
    +        else:
    +            string_body = after_prefix[1:-1]  # Chop off quotes
    +        # No special checks on raw strings at the moment.
    +        if 'r' not in prefix:
    +            self.process_non_raw_string_token(prefix, string_body, start_row)
    +
    +    def process_non_raw_string_token(self, prefix, string_body, start_row):
    +        """check for bad escapes in a non-raw string.
    +
    +        prefix: lowercase string of eg 'ur' string prefix markers.
    +        string_body: the un-parsed body of the string, not including the quote
    +        marks.
    +        start_row: integer line number in the source.
    +        """
    +        # Walk through the string; if we see a backslash then escape the next
    +        # character, and skip over it.  If we see a non-escaped character,
    +        # alert, and continue.
    +        #
    +        # Accept a backslash when it escapes a backslash, or a quote, or
    +        # end-of-line, or one of the letters that introduce a special escape
    +        # sequence 
    +        #
    +        # TODO(mbp): Maybe give a separate warning about the rarely-used
    +        # \a \b \v \f?
    +        #
    +        # TODO(mbp): We could give the column of the problem character, but
    +        # add_message doesn't seem to have a way to pass it through at present.
    +        i = 0
    +        while True:
    +            i = string_body.find('\\', i)
    +            if i == -1:
    +                break
    +            # There must be a next character; having a backslash at the end
    +            # of the string would be a SyntaxError.
    +            next_char = string_body[i+1]
    +            match = string_body[i:i+2]
    +            if next_char in self.UNICODE_ESCAPE_CHARACTERS:
    +                if 'u' in prefix:
    +                    pass
    +                elif (_PY3K or self._unicode_literals) and 'b' not in prefix:
    +                    pass  # unicode by default
    +                else:
    +                    self.add_message('anomalous-unicode-escape-in-string',
    +                                     line=start_row, args=(match, ))
    +            elif next_char not in self.ESCAPE_CHARACTERS:
    +                self.add_message('anomalous-backslash-in-string',
    +                                 line=start_row, args=(match, ))
    +            # Whether it was a valid escape or not, backslash followed by
    +            # another character can always be consumed whole: the second
    +            # character can never be the start of a new backslash escape.
    +            i += 2
    +
    +
    +
    +def register(linter):
    +    """required method to auto register this checker """
    +    linter.register_checker(StringFormatChecker(linter))
    +    linter.register_checker(StringMethodsChecker(linter))
    +    linter.register_checker(StringConstantChecker(linter))
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/typecheck.py b/pymode/libs/pylint/checkers/typecheck.py
    similarity index 65%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/typecheck.py
    rename to pymode/libs/pylint/checkers/typecheck.py
    index 25f7612e..9f074ae0 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/typecheck.py
    +++ b/pymode/libs/pylint/checkers/typecheck.py
    @@ -21,24 +21,23 @@
     
     import astroid
     from astroid import InferenceError, NotFoundError, YES, Instance
    +from astroid.bases import BUILTINS
     
    -from pylint.interfaces import IAstroidChecker
    +from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE
     from pylint.checkers import BaseChecker
    -from pylint.checkers.utils import safe_infer, is_super, check_messages
    +from pylint.checkers.utils import (
    +    safe_infer, is_super,
    +    check_messages, decorated_with_property)
     
     MSGS = {
         'E1101': ('%s %r has no %r member',
                   'no-member',
    -              'Used when a variable is accessed for an unexistent member.'),
    +              'Used when a variable is accessed for an unexistent member.',
    +              {'old_names': [('E1103', 'maybe-no-member')]}),
         'E1102': ('%s is not callable',
                   'not-callable',
                   'Used when an object being called has been inferred to a non \
                   callable object'),
    -    'E1103': ('%s %r has no %r member (but some types could not be inferred)',
    -              'maybe-no-member',
    -              'Used when a variable is accessed for an unexistent member, but \
    -              astroid was not able to interpret all possible types of this \
    -              variable.'),
         'E1111': ('Assigning to function call which doesn\'t return',
                   'assignment-from-no-return',
                   'Used when an assignment is done on a function call but the \
    @@ -55,11 +54,6 @@
                   'too-many-function-args',
                   'Used when a function call passes too many positional \
                   arguments.'),
    -    'E1122': ('Duplicate keyword argument %r in %s call',
    -              'duplicate-keyword-arg',
    -              'Used when a function call passes the same keyword argument \
    -              multiple times.',
    -              {'maxversion': (2, 6)}),
         'E1123': ('Unexpected keyword argument %r in %s call',
                   'unexpected-keyword-arg',
                   'Used when a function call passes a keyword argument that \
    @@ -72,10 +66,23 @@
         'E1125': ('Missing mandatory keyword argument %r in %s call',
                   'missing-kwoa',
                   ('Used when a function call does not pass a mandatory'
    -              ' keyword-only argument.'),
    +               ' keyword-only argument.'),
                   {'minversion': (3, 0)}),
    +    'E1126': ('Sequence index is not an int, slice, or instance with __index__',
    +              'invalid-sequence-index',
    +              'Used when a sequence type is indexed with an invalid type. '
    +              'Valid types are ints, slices, and objects with an __index__ '
    +              'method.'),
    +    'E1127': ('Slice index is not an int, None, or instance with __index__',
    +              'invalid-slice-index',
    +              'Used when a slice index is not an integer, None, or an object \
    +               with an __index__ method.'),
         }
     
    +# builtin sequence types in Python 2 and 3.
    +SEQUENCE_TYPES = set(['str', 'unicode', 'list', 'tuple', 'bytearray',
    +                      'xrange', 'range', 'bytes', 'memoryview'])
    +
     def _determine_callable(callable_obj):
         # Ordering is important, since BoundMethod is a subclass of UnboundMethod,
         # and Function inherits Lambda.
    @@ -132,39 +139,38 @@ class TypeChecker(BaseChecker):
                      'help' : 'Tells whether missing members accessed in mixin \
     class should be ignored. A mixin class is detected if its name ends with \
     "mixin" (case insensitive).'}
    -                ),
    -                ('ignored-modules',
    -                 {'default': (),
    -                  'type': 'csv',
    -                  'metavar': '',
    -                  'help': 'List of module names for which member attributes \
    +               ),
    +               ('ignored-modules',
    +                {'default': (),
    +                 'type': 'csv',
    +                 'metavar': '',
    +                 'help': 'List of module names for which member attributes \
     should not be checked (useful for modules/projects where namespaces are \
     manipulated during runtime and thus existing member attributes cannot be \
     deduced by static analysis'},
    -                 ),
    +               ),
                    ('ignored-classes',
                     {'default' : ('SQLObject',),
                      'type' : 'csv',
                      'metavar' : '',
                      'help' : 'List of classes names for which member attributes \
     should not be checked (useful for classes with attributes dynamically set).'}
    -                 ),
    +               ),
     
                    ('zope',
                     {'default' : False, 'type' : 'yn', 'metavar': '',
                      'help' : 'When zope mode is activated, add a predefined set \
     of Zope acquired attributes to generated-members.'}
    -                ),
    +               ),
                    ('generated-members',
    -                {'default' : (
    -        'REQUEST', 'acl_users', 'aq_parent'),
    +                {'default' : ('REQUEST', 'acl_users', 'aq_parent'),
                      'type' : 'string',
                      'metavar' : '',
                      'help' : 'List of members which are set dynamically and \
     missed by pylint inference system, and so shouldn\'t trigger E0201 when \
     accessed. Python regular expressions are accepted.'}
    -                ),
    -        )
    +               ),
    +              )
     
         def open(self):
             # do this in open since config not fully initialized in __init__
    @@ -179,7 +185,7 @@ def visit_assattr(self, node):
         def visit_delattr(self, node):
             self.visit_getattr(node)
     
    -    @check_messages('no-member', 'maybe-no-member')
    +    @check_messages('no-member')
         def visit_getattr(self, node):
             """check that the accessed attribute exists
     
    @@ -241,6 +247,20 @@ def visit_getattr(self, node):
                     # explicit skipping of module member access
                     if owner.root().name in self.config.ignored_modules:
                         continue
    +                if isinstance(owner, astroid.Class):
    +                    # Look up in the metaclass only if the owner is itself
    +                    # a class.
    +                    # TODO: getattr doesn't return by default members
    +                    # from the metaclass, because handling various cases
    +                    # of methods accessible from the metaclass itself
    +                    # and/or subclasses only is too complicated for little to
    +                    # no benefit.
    +                    metaclass = owner.metaclass()
    +                    try:
    +                        if metaclass and metaclass.getattr(node.attrname):
    +                            continue
    +                    except NotFoundError:
    +                        pass
                     missingattr.add((owner, name))
                     continue
                 # stop on the first found
    @@ -257,13 +277,11 @@ def visit_getattr(self, node):
                     if actual in done:
                         continue
                     done.add(actual)
    -                if inference_failure:
    -                    msgid = 'maybe-no-member'
    -                else:
    -                    msgid = 'no-member'
    -                self.add_message(msgid, node=node,
    +                confidence = INFERENCE if not inference_failure else INFERENCE_FAILURE
    +                self.add_message('no-member', node=node,
                                      args=(owner.display_type(), name,
    -                                       node.attrname))
    +                                       node.attrname),
    +                                 confidence=confidence)
     
         @check_messages('assignment-from-no-return', 'assignment-from-none')
         def visit_assign(self, node):
    @@ -293,7 +311,51 @@ def visit_assign(self, node):
                 else:
                     self.add_message('assignment-from-none', node=node)
     
    -    @check_messages(*(MSGS.keys()))
    +    def _check_uninferable_callfunc(self, node):
    +        """
    +        Check that the given uninferable CallFunc node does not
    +        call an actual function.
    +        """
    +        if not isinstance(node.func, astroid.Getattr):
    +            return
    +
    +        # Look for properties. First, obtain
    +        # the lhs of the Getattr node and search the attribute
    +        # there. If that attribute is a property or a subclass of properties,
    +        # then most likely it's not callable.
    +
    +        # TODO: since astroid doesn't understand descriptors very well
    +        # we will not handle them here, right now.
    +
    +        expr = node.func.expr
    +        klass = safe_infer(expr)
    +        if (klass is None or klass is astroid.YES or
    +                not isinstance(klass, astroid.Instance)):
    +            return
    +
    +        try:
    +            attrs = klass._proxied.getattr(node.func.attrname)
    +        except astroid.NotFoundError:
    +            return
    +
    +        for attr in attrs:
    +            if attr is astroid.YES:
    +                continue
    +            if not isinstance(attr, astroid.Function):
    +                continue
    +
    +            # Decorated, see if it is decorated with a property.
    +            # Also, check the returns and see if they are callable.
    +            if decorated_with_property(attr):
    +                if all(return_node.callable()
    +                       for return_node in attr.infer_call_result(node)):
    +                    continue
    +                else:
    +                    self.add_message('not-callable', node=node,
    +                                     args=node.func.as_string())
    +                    break
    +
    +    @check_messages(*(list(MSGS.keys())))
         def visit_callfunc(self, node):
             """check that called functions/methods are inferred to callable objects,
             and that the arguments passed to the function match the parameters in
    @@ -305,22 +367,22 @@ def visit_callfunc(self, node):
             num_positional_args = 0
             for arg in node.args:
                 if isinstance(arg, astroid.Keyword):
    -                keyword = arg.arg
    -                if keyword in keyword_args:
    -                    self.add_message('duplicate-keyword-arg', node=node, args=keyword)
    -                keyword_args.add(keyword)
    +                keyword_args.add(arg.arg)
                 else:
                     num_positional_args += 1
     
             called = safe_infer(node.func)
             # only function, generator and object defining __call__ are allowed
             if called is not None and not called.callable():
    -            self.add_message('not-callable', node=node, args=node.func.as_string())
    +            self.add_message('not-callable', node=node,
    +                             args=node.func.as_string())
    +
    +        self._check_uninferable_callfunc(node)
     
             try:
                 called, implicit_args, callable_name = _determine_callable(called)
             except ValueError:
    -            # Any error occurred during determining the function type, most of 
    +            # Any error occurred during determining the function type, most of
                 # those errors are handled by different warnings.
                 return
             num_positional_args += implicit_args
    @@ -378,7 +440,8 @@ def visit_callfunc(self, node):
                     break
                 else:
                     # Too many positional arguments.
    -                self.add_message('too-many-function-args', node=node, args=(callable_name,))
    +                self.add_message('too-many-function-args',
    +                                 node=node, args=(callable_name,))
                     break
     
             # 2. Match the keyword arguments.
    @@ -387,13 +450,15 @@ def visit_callfunc(self, node):
                     i = parameter_name_to_index[keyword]
                     if parameters[i][1]:
                         # Duplicate definition of function parameter.
    -                    self.add_message('redundant-keyword-arg', node=node, args=(keyword, callable_name))
    +                    self.add_message('redundant-keyword-arg',
    +                                     node=node, args=(keyword, callable_name))
                     else:
                         parameters[i][1] = True
                 elif keyword in kwparams:
                     if kwparams[keyword][1]:  # XXX is that even possible?
                         # Duplicate definition of function parameter.
    -                    self.add_message('redundant-keyword-arg', node=node, args=(keyword, callable_name))
    +                    self.add_message('redundant-keyword-arg', node=node,
    +                                     args=(keyword, callable_name))
                     else:
                         kwparams[keyword][1] = True
                 elif called.args.kwarg is not None:
    @@ -401,7 +466,8 @@ def visit_callfunc(self, node):
                     pass
                 else:
                     # Unexpected keyword argument.
    -                self.add_message('unexpected-keyword-arg', node=node, args=(keyword, callable_name))
    +                self.add_message('unexpected-keyword-arg', node=node,
    +                                 args=(keyword, callable_name))
     
             # 3. Match the *args, if any.  Note that Python actually processes
             #    *args _before_ any keyword arguments, but we wait until after
    @@ -438,13 +504,123 @@ def visit_callfunc(self, node):
                         display_name = ''
                     else:
                         display_name = repr(name)
    -                self.add_message('no-value-for-parameter', node=node, args=(display_name, callable_name))
    +                self.add_message('no-value-for-parameter', node=node,
    +                                 args=(display_name, callable_name))
     
             for name in kwparams:
                 defval, assigned = kwparams[name]
                 if defval is None and not assigned:
    -                self.add_message('missing-kwoa', node=node, args=(name, callable_name))
    +                self.add_message('missing-kwoa', node=node,
    +                                 args=(name, callable_name))
    +
    +    @check_messages('invalid-sequence-index')
    +    def visit_extslice(self, node):
    +        # Check extended slice objects as if they were used as a sequence
    +        # index to check if the object being sliced can support them
    +        return self.visit_index(node)
    +
    +    @check_messages('invalid-sequence-index')
    +    def visit_index(self, node):
    +        if not node.parent or not hasattr(node.parent, "value"):
    +            return
    +
    +        # Look for index operations where the parent is a sequence type.
    +        # If the types can be determined, only allow indices to be int,
    +        # slice or instances with __index__.
    +
    +        parent_type = safe_infer(node.parent.value)
    +        if not isinstance(parent_type, (astroid.Class, astroid.Instance)):
    +            return
    +
    +        # Determine what method on the parent this index will use
    +        # The parent of this node will be a Subscript, and the parent of that
    +        # node determines if the Subscript is a get, set, or delete operation.
    +        operation = node.parent.parent
    +        if isinstance(operation, astroid.Assign):
    +            methodname = '__setitem__'
    +        elif isinstance(operation, astroid.Delete):
    +            methodname = '__delitem__'
    +        else:
    +            methodname = '__getitem__'
    +
    +        # Check if this instance's __getitem__, __setitem__, or __delitem__, as
    +        # appropriate to the statement, is implemented in a builtin sequence
    +        # type. This way we catch subclasses of sequence types but skip classes
    +        # that override __getitem__ and which may allow non-integer indices.
    +        try:
    +            methods = parent_type.getattr(methodname)
    +            if methods is astroid.YES:
    +                return
    +            itemmethod = methods[0]
    +        except (astroid.NotFoundError, IndexError):
    +            return
    +
    +        if not isinstance(itemmethod, astroid.Function):
    +            return
    +        if itemmethod.root().name != BUILTINS:
    +            return
    +        if not itemmethod.parent:
    +            return
    +        if itemmethod.parent.name not in SEQUENCE_TYPES:
    +            return
    +
    +        # For ExtSlice objects coming from visit_extslice, no further
    +        # inference is necessary, since if we got this far the ExtSlice
    +        # is an error.
    +        if isinstance(node, astroid.ExtSlice):
    +            index_type = node
    +        else:
    +            index_type = safe_infer(node)
    +        if index_type is None or index_type is astroid.YES:
    +            return
    +
    +        # Constants must be of type int
    +        if isinstance(index_type, astroid.Const):
    +            if isinstance(index_type.value, int):
    +                return
    +        # Instance values must be int, slice, or have an __index__ method
    +        elif isinstance(index_type, astroid.Instance):
    +            if index_type.pytype() in (BUILTINS + '.int', BUILTINS + '.slice'):
    +                return
    +            try:
    +                index_type.getattr('__index__')
    +                return
    +            except astroid.NotFoundError:
    +                pass
    +
    +        # Anything else is an error
    +        self.add_message('invalid-sequence-index', node=node)
    +
    +    @check_messages('invalid-slice-index')
    +    def visit_slice(self, node):
    +        # Check the type of each part of the slice
    +        for index in (node.lower, node.upper, node.step):
    +            if index is None:
    +                continue
    +
    +            index_type = safe_infer(index)
    +            if index_type is None or index_type is astroid.YES:
    +                continue
    +
    +            # Constants must of type int or None
    +            if isinstance(index_type, astroid.Const):
    +                if isinstance(index_type.value, (int, type(None))):
    +                    continue
    +            # Instance values must be of type int, None or an object
    +            # with __index__
    +            elif isinstance(index_type, astroid.Instance):
    +                if index_type.pytype() in (BUILTINS + '.int',
    +                                           BUILTINS + '.NoneType'):
    +                    continue
    +
    +                try:
    +                    index_type.getattr('__index__')
    +                    return
    +                except astroid.NotFoundError:
    +                    pass
     
    +            # Anything else is an error
    +            self.add_message('invalid-slice-index', node=node)
     
     def register(linter):
         """required method to auto register this checker """
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/utils.py b/pymode/libs/pylint/checkers/utils.py
    similarity index 69%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/utils.py
    rename to pymode/libs/pylint/checkers/utils.py
    index e7d85d41..2cb01d55 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/utils.py
    +++ b/pymode/libs/pylint/checkers/utils.py
    @@ -19,6 +19,7 @@
     """
     
     import re
    +import sys
     import string
     
     import astroid
    @@ -26,8 +27,15 @@
     from logilab.common.compat import builtins
     
     BUILTINS_NAME = builtins.__name__
    -
     COMP_NODE_TYPES = astroid.ListComp, astroid.SetComp, astroid.DictComp, astroid.GenExpr
    +PY3K = sys.version_info[0] == 3
    +
    +if not PY3K:
    +    EXCEPTIONS_MODULE = "exceptions"
    +else:
    +    EXCEPTIONS_MODULE = "builtins"
    +ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod',
    +                   'abc.abstractclassmethod', 'abc.abstractstaticmethod'))
     
     
     class NoSuchArgumentError(Exception):
    @@ -66,11 +74,10 @@ def clobber_in_except(node):
             if is_builtin(name):
                 return (True, (name, 'builtins'))
             else:
    -            scope, stmts = node.lookup(name)
    -            if (stmts and
    -                not isinstance(stmts[0].ass_type(),
    -                               (astroid.Assign, astroid.AugAssign,
    -                                astroid.ExceptHandler))):
    +            stmts = node.lookup(name)[1]
    +            if (stmts and not isinstance(stmts[0].ass_type(),
    +                                         (astroid.Assign, astroid.AugAssign,
    +                                          astroid.ExceptHandler))):
                     return (True, (name, 'outer scope (line %s)' % stmts[0].fromlineno))
         return (False, None)
     
    @@ -82,11 +89,11 @@ def safe_infer(node):
         """
         try:
             inferit = node.infer()
    -        value = inferit.next()
    +        value = next(inferit)
         except astroid.InferenceError:
             return
         try:
    -        inferit.next()
    +        next(inferit)
             return # None if there is ambiguity on the inferred node
         except astroid.InferenceError:
             return # there is some kind of ambiguity
    @@ -152,12 +159,12 @@ def is_defined_before(var_node):
                     if ass_node.name == varname:
                         return True
             elif isinstance(_node, astroid.With):
    -            for expr, vars in _node.items:
    +            for expr, ids in _node.items:
                     if expr.parent_of(var_node):
    -                    break                
    -                if (vars and
    -                    isinstance(vars, astroid.AssName) and
    -                    vars.name == varname):
    +                    break
    +                if (ids and
    +                        isinstance(ids, astroid.AssName) and
    +                        ids.name == varname):
                         return True
             elif isinstance(_node, (astroid.Lambda, astroid.Function)):
                 if _node.args.is_argument(varname):
    @@ -204,9 +211,9 @@ def is_func_decorator(node):
             if isinstance(parent, astroid.Decorators):
                 return True
             if (parent.is_statement or
    -            isinstance(parent, astroid.Lambda) or
    -            isinstance(parent, (scoped_nodes.ComprehensionScope,
    -                                scoped_nodes.ListComp))):
    +                isinstance(parent, astroid.Lambda) or
    +                isinstance(parent, (scoped_nodes.ComprehensionScope,
    +                                    scoped_nodes.ListComp))):
                 break
             parent = parent.parent
         return False
    @@ -268,7 +275,7 @@ def overrides_a_method(class_node, name):
                      '__or__', '__ior__', '__ror__',
                      '__xor__', '__ixor__', '__rxor__',
                      # XXX To be continued
    -                 ))
    +                ))
     
     def check_messages(*messages):
         """decorator to store messages that are handled by a checker method"""
    @@ -345,7 +352,11 @@ def next_char(i):
                 if char in 'hlL':
                     i, char = next_char(i)
                 # Parse the conversion type (mandatory).
    -            if char not in 'diouxXeEfFgGcrs%':
    +            if PY3K:
    +                flags = 'diouxXeEfFgGcrs%a'
    +            else:
    +                flags = 'diouxXeEfFgGcrs%'
    +            if char not in flags:
                     raise UnsupportedFormatCharacter(i)
                 if key:
                     keys.add(key)
    @@ -354,12 +365,13 @@ def next_char(i):
             i += 1
         return keys, num_args
     
    +
     def is_attr_protected(attrname):
         """return True if attribute name is protected (start with _ and some other
         details), False otherwise.
         """
         return attrname[0] == '_' and not attrname == '_' and not (
    -             attrname.startswith('__') and attrname.endswith('__'))
    +        attrname.startswith('__') and attrname.endswith('__'))
     
     def node_frame_class(node):
         """return klass node for a method node (or a staticmethod or a
    @@ -380,8 +392,8 @@ def is_super_call(expr):
         is super. Check before that you're in a method.
         """
         return (isinstance(expr, astroid.CallFunc) and
    -        isinstance(expr.func, astroid.Name) and
    -        expr.func.name == 'super')
    +            isinstance(expr.func, astroid.Name) and
    +            expr.func.name == 'super')
     
     def is_attr_private(attrname):
         """Check that attribute name is private (at least two leading underscores,
    @@ -407,10 +419,146 @@ def get_argument_from_call(callfunc_node, position=None, keyword=None):
         try:
             if position is not None and not isinstance(callfunc_node.args[position], astroid.Keyword):
                 return callfunc_node.args[position]
    -    except IndexError, error:
    +    except IndexError as error:
             raise NoSuchArgumentError(error)
         if keyword:
             for arg in callfunc_node.args:
                 if isinstance(arg, astroid.Keyword) and arg.arg == keyword:
                     return arg.value
         raise NoSuchArgumentError
    +
    +def inherit_from_std_ex(node):
    +    """
    +    Return true if the given class node is subclass of
    +    exceptions.Exception.
    +    """
    +    if node.name in ('Exception', 'BaseException') \
    +            and node.root().name == EXCEPTIONS_MODULE:
    +        return True
    +    return any(inherit_from_std_ex(parent)
    +               for parent in node.ancestors(recurs=False))
    +
    +def is_import_error(handler):
    +    """
    +    Check if the given exception handler catches
    +    ImportError.
    +
    +    :param handler: A node, representing an ExceptHandler node.
    +    :returns: True if the handler catches ImportError, False otherwise.
    +    """
    +    names = None
    +    if isinstance(handler.type, astroid.Tuple):
    +        names = [name for name in handler.type.elts
    +                 if isinstance(name, astroid.Name)]
    +    elif isinstance(handler.type, astroid.Name):
    +        names = [handler.type]
    +    else:
    +        # Don't try to infer that.
    +        return
    +    for name in names:
    +        try:
    +            for infered in name.infer():
    +                if (isinstance(infered, astroid.Class) and
    +                        inherit_from_std_ex(infered) and
    +                        infered.name == 'ImportError'):
    +                    return True
    +        except astroid.InferenceError:
    +            continue
    +
    +def has_known_bases(klass):
    +    """Returns true if all base classes of a class could be inferred."""
    +    try:
    +        return klass._all_bases_known
    +    except AttributeError:
    +        pass
    +    for base in klass.bases:
    +        result = safe_infer(base)
    +        # TODO: check for A->B->A->B pattern in class structure too?
    +        if (not isinstance(result, astroid.Class) or
    +                result is klass or
    +                not has_known_bases(result)):
    +            klass._all_bases_known = False
    +            return False
    +    klass._all_bases_known = True
    +    return True
    +
    +def decorated_with_property(node):
    +    """ Detect if the given function node is decorated with a property. """
    +    if not node.decorators:
    +        return False
    +    for decorator in node.decorators.nodes:
    +        if not isinstance(decorator, astroid.Name):
    +            continue
    +        try:
    +            for infered in decorator.infer():
    +                if isinstance(infered, astroid.Class):
    +                    if (infered.root().name == BUILTINS_NAME and
    +                            infered.name == 'property'):
    +                        return True
    +                    for ancestor in infered.ancestors():
    +                        if (ancestor.name == 'property' and
    +                                ancestor.root().name == BUILTINS_NAME):
    +                            return True
    +        except astroid.InferenceError:
    +            pass
    +
    +
    +def decorated_with_abc(func):
    +    """Determine if the `func` node is decorated with `abc` decorators."""
    +    if func.decorators:
    +        for node in func.decorators.nodes:
    +            try:
    +                infered = next(node.infer())
    +            except astroid.InferenceError:
    +                continue
    +            if infered and infered.qname() in ABC_METHODS:
    +                return True
    +
    +
    +def unimplemented_abstract_methods(node, is_abstract_cb=decorated_with_abc):
    +    """
    +    Get the unimplemented abstract methods for the given *node*.
    +
    +    A method can be considered abstract if the callback *is_abstract_cb*
    +    returns a ``True`` value. The check defaults to verifying that
    +    a method is decorated with abstract methods.
    +    The function will work only for new-style classes. For old-style
    +    classes, it will simply return an empty dictionary.
    +    For the rest of them, it will return a dictionary of abstract method
    +    names and their inferred objects.
    +    """
    +    visited = {}
    +    try:
    +        mro = reversed(node.mro())
    +    except NotImplementedError:
    +        # Old style class, it will not have a mro.
    +        return {}
    +    except astroid.ResolveError:
    +        # Probably inconsistent hierarchy, don'try
    +        # to figure this out here.
    +        return {}
    +    for ancestor in mro:
    +        for obj in ancestor.values():
    +            infered = obj
    +            if isinstance(obj, astroid.AssName):
    +                infered = safe_infer(obj)
    +                if not infered:
    +                    continue
    +                if not isinstance(infered, astroid.Function):
    +                    if obj.name in visited:
    +                        del visited[obj.name]
    +            if isinstance(infered, astroid.Function):
    +                # It's critical to use the original name,
    +                # since after inferring, an object can be something
    +                # else than expected, as in the case of the
    +                # following assignment.
    +                #
    +                # class A:
    +                #     def keys(self): pass
    +                #     __iter__ = keys
    +                abstract = is_abstract_cb(infered)
    +                if abstract:
    +                    visited[obj.name] = infered
    +                elif not abstract and obj.name in visited:
    +                    del visited[obj.name]
    +    return visited
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/variables.py b/pymode/libs/pylint/checkers/variables.py
    similarity index 60%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/variables.py
    rename to pymode/libs/pylint/checkers/variables.py
    index dc8d1115..8f6f9574 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/variables.py
    +++ b/pymode/libs/pylint/checkers/variables.py
    @@ -17,20 +17,26 @@
     """
     import os
     import sys
    +import re
     from copy import copy
     
     import astroid
    -from astroid import are_exclusive, builtin_lookup, AstroidBuildingException
    +from astroid import are_exclusive, builtin_lookup
    +from astroid import modutils
     
    -from logilab.common.modutils import file_from_modpath
    -
    -from pylint.interfaces import IAstroidChecker
    +from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
    +from pylint.utils import get_global_option
     from pylint.checkers import BaseChecker
    -from pylint.checkers.utils import (PYMETHODS, is_ancestor_name, is_builtin,
    -     is_defined_before, is_error, is_func_default, is_func_decorator,
    -     assign_parent, check_messages, is_inside_except, clobber_in_except,
    -     get_all_elements)
    +from pylint.checkers.utils import (
    +    PYMETHODS, is_ancestor_name, is_builtin,
    +    is_defined_before, is_error, is_func_default, is_func_decorator,
    +    assign_parent, check_messages, is_inside_except, clobber_in_except,
    +    get_all_elements, has_known_bases)
    +import six
    +
    +SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
     
    +PY3K = sys.version_info >= (3, 0)
     
     def in_for_else_branch(parent, stmt):
         """Returns True if stmt in inside the else branch for a parent For stmt."""
    @@ -40,7 +46,7 @@ def in_for_else_branch(parent, stmt):
     def overridden_method(klass, name):
         """get overridden method if any"""
         try:
    -        parent = klass.local_attr_ancestors(name).next()
    +        parent = next(klass.local_attr_ancestors(name))
         except (StopIteration, KeyError):
             return None
         try:
    @@ -68,6 +74,120 @@ def _get_unpacking_extra_info(node, infered):
             more = ' defined at line %s of %s' % (infered.lineno, infered_module)
         return more
     
    +def _detect_global_scope(node, frame, defframe):
    +    """ Detect that the given frames shares a global
    +    scope.
    +
    +    Two frames shares a global scope when neither
    +    of them are hidden under a function scope, as well
    +    as any of parent scope of them, until the root scope.
    +    In this case, depending from something defined later on
    +    will not work, because it is still undefined.
    +
    +    Example:
    +        class A:
    +            # B has the same global scope as `C`, leading to a NameError.
    +            class B(C): ...
    +        class C: ...
    +
    +    """
    +    def_scope = scope = None
    +    if frame and frame.parent:
    +        scope = frame.parent.scope()
    +    if defframe and defframe.parent:
    +        def_scope = defframe.parent.scope()
    +    if isinstance(frame, astroid.Function):
    +        # If the parent of the current node is a
    +        # function, then it can be under its scope
    +        # (defined in, which doesn't concern us) or
    +        # the `->` part of annotations. The same goes
    +        # for annotations of function arguments, they'll have
    +        # their parent the Arguments node.
    +        if not isinstance(node.parent,
    +                          (astroid.Function, astroid.Arguments)):
    +            return False
    +    elif any(not isinstance(f, (astroid.Class, astroid.Module))
    +             for f in (frame, defframe)):
    +        # Not interested in other frames, since they are already
    +        # not in a global scope.
    +        return False
    +
    +    break_scopes = []
    +    for s in (scope, def_scope):
    +        # Look for parent scopes. If there is anything different
    +        # than a module or a class scope, then they frames don't
    +        # share a global scope.
    +        parent_scope = s
    +        while parent_scope:
    +            if not isinstance(parent_scope, (astroid.Class, astroid.Module)):
    +                break_scopes.append(parent_scope)
    +                break
    +            if parent_scope.parent:
    +                parent_scope = parent_scope.parent.scope()
    +            else:
    +                break
    +    if break_scopes and len(set(break_scopes)) != 1:
    +        # Store different scopes than expected.
    +        # If the stored scopes are, in fact, the very same, then it means
    +        # that the two frames (frame and defframe) shares the same scope,
    +        # and we could apply our lineno analysis over them.
    +        # For instance, this works when they are inside a function, the node
    +        # that uses a definition and the definition itself.
    +        return False
    +    # At this point, we are certain that frame and defframe shares a scope
    +    # and the definition of the first depends on the second.
    +    return frame.lineno < defframe.lineno
    +
    +def _fix_dot_imports(not_consumed):
    +    """ Try to fix imports with multiple dots, by returning a dictionary
    +    with the import names expanded. The function unflattens root imports,
    +    like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
    +    and 'xml.sax' respectively.
    +    """
    +    # TODO: this should be improved in issue astroid #46
    +    names = {}
    +    for name, stmts in six.iteritems(not_consumed):
    +        if any(isinstance(stmt, astroid.AssName)
    +               and isinstance(stmt.ass_type(), astroid.AugAssign)
    +               for stmt in stmts):
    +            continue
    +        for stmt in stmts:
    +            if not isinstance(stmt, (astroid.From, astroid.Import)):
    +                continue
    +            for imports in stmt.names:
    +                second_name = None
    +                if imports[0] == "*":
    +                    # In case of wildcard imports,
    +                    # pick the name from inside the imported module.
    +                    second_name = name
    +                else:
    +                    if imports[0].find(".") > -1 or name in imports:
    +                        # Most likely something like 'xml.etree',
    +                        # which will appear in the .locals as 'xml'.
    +                        # Only pick the name if it wasn't consumed.
    +                        second_name = imports[0]
    +                if second_name and second_name not in names:
    +                    names[second_name] = stmt
    +    return sorted(names.items(), key=lambda a: a[1].fromlineno)
    +
    +def _find_frame_imports(name, frame):
    +    """
    +    Detect imports in the frame, with the required
    +    *name*. Such imports can be considered assignments.
    +    Returns True if an import for the given name was found.
    +    """
    +    imports = frame.nodes_of_class((astroid.Import, astroid.From))
    +    for import_node in imports:
    +        for import_name, import_alias in import_node.names:
    +            # If the import uses an alias, check only that.
    +            # Otherwise, check only the import name.
    +            if import_alias:
    +                if import_alias == name:
    +                    return True
    +            elif import_name and import_name == name:
    +                return True
    +
    +
     MSGS = {
         'E0601': ('Using variable %r before assignment',
                   'used-before-assignment',
    @@ -97,13 +217,13 @@ def _get_unpacking_extra_info(node, infered):
         'W0603': ('Using the global statement', # W0121
                   'global-statement',
                   'Used when you use the "global" statement to update a global \
    -              variable. PyLint just try to discourage this \
    +              variable. Pylint just try to discourage this \
                   usage. That doesn\'t mean you can not use it !'),
         'W0604': ('Using the global statement at the module level', # W0103
                   'global-at-module-level',
                   'Used when you use the "global" statement at the module level \
                   since it has no effect'),
    -    'W0611': ('Unused import %s',
    +    'W0611': ('Unused %s',
                   'unused-import',
                   'Used when an imported module or variable is not used.'),
         'W0612': ('Unused variable %r',
    @@ -147,7 +267,7 @@ def _get_unpacking_extra_info(node, infered):
                   'a sequence is used in an unpack assignment'),
     
         'W0640': ('Cell variable %s defined in loop',
    -              'cell-var-from-loop', 
    +              'cell-var-from-loop',
                   'A variable used in a closure is defined in a loop. '
                   'This will result in all closures using the same value for '
                   'the closed-over variable.'),
    @@ -168,8 +288,7 @@ class VariablesChecker(BaseChecker):
         name = 'variables'
         msgs = MSGS
         priority = -1
    -    options = (
    -               ("init-import",
    +    options = (("init-import",
                     {'default': 0, 'type' : 'yn', 'metavar' : '',
                      'help' : 'Tells whether we should check for unused import in \
     __init__ files.'}),
    @@ -183,8 +302,15 @@ class VariablesChecker(BaseChecker):
                      'metavar' : '',
                      'help' : 'List of additional names supposed to be defined in \
     builtins. Remember that you should avoid to define new builtins when possible.'
    -                 }),
    +                }),
    +               ("callbacks",
    +                {'default' : ('cb_', '_cb'), 'type' : 'csv',
    +                 'metavar' : '',
    +                 'help' : 'List of strings which can identify a callback '
    +                          'function by name. A callback name must start or '
    +                          'end with one of those strings.'}
                    )
    +              )
         def __init__(self, linter=None):
             BaseChecker.__init__(self, linter)
             self._to_consume = None
    @@ -195,12 +321,14 @@ def visit_module(self, node):
             checks globals doesn't overrides builtins
             """
             self._to_consume = [(copy(node.locals), {}, 'module')]
    -        for name, stmts in node.locals.iteritems():
    +        for name, stmts in six.iteritems(node.locals):
                 if is_builtin(name) and not is_inside_except(stmts[0]):
                     # do not print Redefining builtin for additional builtins
                     self.add_message('redefined-builtin', args=name, node=stmts[0])
     
    -    @check_messages('unused-import', 'unused-wildcard-import', 'redefined-builtin', 'undefined-all-variable', 'invalid-all-object')
    +    @check_messages('unused-import', 'unused-wildcard-import',
    +                    'redefined-builtin', 'undefined-all-variable',
    +                    'invalid-all-object')
         def leave_module(self, node):
             """leave module: check globals
             """
    @@ -208,17 +336,18 @@ def leave_module(self, node):
             not_consumed = self._to_consume.pop()[0]
             # attempt to check for __all__ if defined
             if '__all__' in node.locals:
    -            assigned = node.igetattr('__all__').next()
    +            assigned = next(node.igetattr('__all__'))
                 if assigned is not astroid.YES:
                     for elt in getattr(assigned, 'elts', ()):
                         try:
    -                        elt_name = elt.infer().next()
    +                        elt_name = next(elt.infer())
                         except astroid.InferenceError:
                             continue
     
                         if not isinstance(elt_name, astroid.Const) \
    -                             or not isinstance(elt_name.value, basestring):
    -                        self.add_message('invalid-all-object', args=elt.as_string(), node=elt)
    +                             or not isinstance(elt_name.value, six.string_types):
    +                        self.add_message('invalid-all-object',
    +                                         args=elt.as_string(), node=elt)
                             continue
                         elt_name = elt_name.value
                         # If elt is in not_consumed, remove it from not_consumed
    @@ -235,12 +364,12 @@ def leave_module(self, node):
                                 if os.path.basename(basename) == '__init__':
                                     name = node.name + "." + elt_name
                                     try:
    -                                    file_from_modpath(name.split("."))
    +                                    modutils.file_from_modpath(name.split("."))
                                     except ImportError:
                                         self.add_message('undefined-all-variable',
                                                          args=elt_name,
                                                          node=elt)
    -                                except SyntaxError, exc:
    +                                except SyntaxError:
                                         # don't yield an syntax-error warning,
                                         # because it will be later yielded
                                         # when the file will be checked
    @@ -248,19 +377,52 @@ def leave_module(self, node):
             # don't check unused imports in __init__ files
             if not self.config.init_import and node.package:
                 return
    -        for name, stmts in not_consumed.iteritems():
    -            if any(isinstance(stmt, astroid.AssName)
    -                   and isinstance(stmt.ass_type(), astroid.AugAssign)
    -                   for stmt in stmts):
    -                continue
    -            stmt = stmts[0]
    -            if isinstance(stmt, astroid.Import):
    -                self.add_message('unused-import', args=name, node=stmt)
    -            elif isinstance(stmt, astroid.From) and stmt.modname != '__future__':
    -                if stmt.names[0][0] == '*':
    -                    self.add_message('unused-wildcard-import', args=name, node=stmt)
    -                else:
    -                    self.add_message('unused-import', args=name, node=stmt)
    +
    +        self._check_imports(not_consumed)
    +
    +    def _check_imports(self, not_consumed):
    +        local_names = _fix_dot_imports(not_consumed)
    +        checked = set()
    +        for name, stmt in local_names:
    +            for imports in stmt.names:
    +                real_name = imported_name = imports[0]
    +                if imported_name == "*":
    +                    real_name = name
    +                as_name = imports[1]
    +                if real_name in checked:
    +                    continue
    +                if name not in (real_name, as_name):
    +                    continue
    +                checked.add(real_name)
    +
    +                if (isinstance(stmt, astroid.Import) or
    +                        (isinstance(stmt, astroid.From) and
    +                         not stmt.modname)):
    +                    if (isinstance(stmt, astroid.From) and
    +                            SPECIAL_OBJ.search(imported_name)):
    +                        # Filter special objects (__doc__, __all__) etc.,
    +                        # because they can be imported for exporting.
    +                        continue
    +                    if as_name is None:
    +                        msg = "import %s" % imported_name
    +                    else:
    +                        msg = "%s imported as %s" % (imported_name, as_name)
    +                    self.add_message('unused-import', args=msg, node=stmt)
    +                elif isinstance(stmt, astroid.From) and stmt.modname != '__future__':
    +                    if SPECIAL_OBJ.search(imported_name):
    +                        # Filter special objects (__doc__, __all__) etc.,
    +                        # because they can be imported for exporting.
    +                        continue
    +                    if imported_name == '*':
    +                        self.add_message('unused-wildcard-import',
    +                                         args=name, node=stmt)
    +                    else:
    +                        if as_name is None:
    +                            msg = "%s imported from %s" % (imported_name, stmt.modname)
    +                        else:
    +                            fields = (imported_name, stmt.modname, as_name)
    +                            msg = "%s imported from %s as %s" % fields
    +                        self.add_message('unused-import', args=msg, node=stmt)
             del self._to_consume
     
         def visit_class(self, node):
    @@ -352,10 +514,21 @@ def leave_function(self, node):
             klass = node.parent.frame()
             if is_method and (klass.type == 'interface' or node.is_abstract()):
                 return
    +        if is_method and isinstance(klass, astroid.Class):
    +            confidence = INFERENCE if has_known_bases(klass) else INFERENCE_FAILURE
    +        else:
    +            confidence = HIGH
             authorized_rgx = self.config.dummy_variables_rgx
             called_overridden = False
             argnames = node.argnames()
    -        for name, stmts in not_consumed.iteritems():
    +        global_names = set()
    +        nonlocal_names = set()
    +        for global_stmt in node.nodes_of_class(astroid.Global):
    +            global_names.update(set(global_stmt.names))
    +        for nonlocal_stmt in node.nodes_of_class(astroid.Nonlocal):
    +            nonlocal_names.update(set(nonlocal_stmt.names))
    +
    +        for name, stmts in six.iteritems(not_consumed):
                 # ignore some special names specified by user configuration
                 if authorized_rgx.match(name):
                     continue
    @@ -364,6 +537,23 @@ def leave_function(self, node):
                 stmt = stmts[0]
                 if isinstance(stmt, astroid.Global):
                     continue
    +            if isinstance(stmt, (astroid.Import, astroid.From)):
    +                # Detect imports, assigned to global statements.
    +                if global_names:
    +                    skip = False
    +                    for import_name, import_alias in stmt.names:
    +                        # If the import uses an alias, check only that.
    +                        # Otherwise, check only the import name.
    +                        if import_alias:
    +                            if import_alias in global_names:
    +                                skip = True
    +                                break
    +                        elif import_name in global_names:
    +                            skip = True
    +                            break
    +                    if skip:
    +                        continue
    +
                 # care about functions with unknown argument (builtins)
                 if name in argnames:
                     if is_method:
    @@ -378,11 +568,16 @@ def leave_function(self, node):
                             continue
                         if node.name in PYMETHODS and node.name not in ('__init__', '__new__'):
                             continue
    -                # don't check callback arguments XXX should be configurable
    -                if node.name.startswith('cb_') or node.name.endswith('_cb'):
    +                # don't check callback arguments
    +                if any(node.name.startswith(cb) or node.name.endswith(cb)
    +                       for cb in self.config.callbacks):
                         continue
    -                self.add_message('unused-argument', args=name, node=stmt)
    +                self.add_message('unused-argument', args=name, node=stmt,
    +                                 confidence=confidence)
                 else:
    +                if stmt.parent and isinstance(stmt.parent, astroid.Assign):
    +                    if name in nonlocal_names:
    +                        continue
                     self.add_message('unused-variable', args=name, node=stmt)
     
         @check_messages('global-variable-undefined', 'global-variable-not-assigned', 'global-statement',
    @@ -410,8 +605,9 @@ def visit_global(self, node):
                         # same scope level assignment
                         break
                 else:
    -                # global but no assignment
    -                self.add_message('global-variable-not-assigned', args=name, node=node)
    +                if not _find_frame_imports(name, frame):
    +                    self.add_message('global-variable-not-assigned',
    +                                     args=name, node=node)
                     default_message = False
                 if not assign_nodes:
                     continue
    @@ -429,10 +625,16 @@ def visit_global(self, node):
             if default_message:
                 self.add_message('global-statement', node=node)
     
    -    def _check_late_binding_closure(self, node, assignment_node, scope_type):
    +    def _check_late_binding_closure(self, node, assignment_node):
    +        def _is_direct_lambda_call():
    +            return (isinstance(node_scope.parent, astroid.CallFunc)
    +                    and node_scope.parent.func is node_scope)
    +
             node_scope = node.scope()
             if not isinstance(node_scope, (astroid.Lambda, astroid.Function)):
                 return
    +        if isinstance(node.parent, astroid.Arguments):
    +            return
     
             if isinstance(assignment_node, astroid.Comprehension):
                 if assignment_node.parent.parent_of(node.scope()):
    @@ -445,9 +647,11 @@ def _check_late_binding_closure(self, node, assignment_node, scope_type):
                         break
                     maybe_for = maybe_for.parent
                 else:
    -                if maybe_for.parent_of(node_scope) and not isinstance(node_scope.statement(), astroid.Return):
    +                if (maybe_for.parent_of(node_scope)
    +                        and not _is_direct_lambda_call()
    +                        and not isinstance(node_scope.statement(), astroid.Return)):
                         self.add_message('cell-var-from-loop', node=node, args=node.name)
    -        
    +
         def _loopvar_name(self, node, name):
             # filter variables according to node's scope
             # XXX used to filter parents but don't remember why, and removing this
    @@ -474,7 +678,7 @@ def _loopvar_name(self, node, name):
                 _astmts = astmts[:1]
             for i, stmt in enumerate(astmts[1:]):
                 if (astmts[i].statement().parent_of(stmt)
    -                and not in_for_else_branch(astmts[i].statement(), stmt)):
    +                    and not in_for_else_branch(astmts[i].statement(), stmt)):
                     continue
                 _astmts.append(stmt)
             astmts = _astmts
    @@ -514,7 +718,7 @@ def visit_name(self, node):
             # a decorator, then start from the parent frame of the function instead
             # of the function frame - and thus open an inner class scope
             if (is_func_default(node) or is_func_decorator(node)
    -            or is_ancestor_name(frame, node)):
    +                or is_ancestor_name(frame, node)):
                 start_index = len(self._to_consume) - 2
             else:
                 start_index = len(self._to_consume) - 1
    @@ -528,14 +732,37 @@ def visit_name(self, node):
                 # names. The only exception is when the starting scope is a
                 # comprehension and its direct outer scope is a class
                 if scope_type == 'class' and i != start_index and not (
    -                base_scope_type == 'comprehension' and i == start_index-1):
    -                # XXX find a way to handle class scope in a smoother way
    -                continue
    +                    base_scope_type == 'comprehension' and i == start_index-1):
    +                # Detect if we are in a local class scope, as an assignment.
    +                # For example, the following is fair game.
    +                #
    +                # class A:
    +                #    b = 1
    +                #    c = lambda b=b: b * b
    +                #
    +                # class B:
    +                #    tp = 1
    +                #    def func(self, arg: tp):
    +                #        ...
    +
    +                in_annotation = (
    +                    PY3K and isinstance(frame, astroid.Function)
    +                    and node.statement() is frame and
    +                    (node in frame.args.annotations
    +                     or node is frame.args.varargannotation
    +                     or node is frame.args.kwargannotation))
    +                if in_annotation:
    +                    frame_locals = frame.parent.scope().locals
    +                else:
    +                    frame_locals = frame.locals
    +                if not ((isinstance(frame, astroid.Class) or in_annotation)
    +                        and name in frame_locals):
    +                    continue
                 # the name has already been consumed, only check it's not a loop
                 # variable used outside the loop
                 if name in consumed:
                     defnode = assign_parent(consumed[name][0])
    -                self._check_late_binding_closure(node, defnode, scope_type)
    +                self._check_late_binding_closure(node, defnode)
                     self._loopvar_name(node, name)
                     break
                 # mark the name as consumed if it's defined in this scope
    @@ -547,12 +774,12 @@ def visit_name(self, node):
                 # checks for use before assignment
                 defnode = assign_parent(to_consume[name][0])
                 if defnode is not None:
    -                self._check_late_binding_closure(node, defnode, scope_type)
    +                self._check_late_binding_closure(node, defnode)
                     defstmt = defnode.statement()
                     defframe = defstmt.frame()
                     maybee0601 = True
                     if not frame is defframe:
    -                    maybee0601 = False
    +                    maybee0601 = _detect_global_scope(node, frame, defframe)
                     elif defframe.parent is None:
                         # we are at the module level, check the name is not
                         # defined in builtins
    @@ -569,16 +796,71 @@ def visit_name(self, node):
                                 maybee0601 = not any(isinstance(child, astroid.Nonlocal)
                                                      and name in child.names
                                                      for child in defframe.get_children())
    +
    +                # Handle a couple of class scoping issues.
    +                annotation_return = False
    +                # The class reuses itself in the class scope.
    +                recursive_klass = (frame is defframe and
    +                                   defframe.parent_of(node) and
    +                                   isinstance(defframe, astroid.Class) and
    +                                   node.name == defframe.name)
    +                if (self._to_consume[-1][-1] == 'lambda' and
    +                        isinstance(frame, astroid.Class)
    +                        and name in frame.locals):
    +                    maybee0601 = True
    +                elif (isinstance(defframe, astroid.Class) and
    +                      isinstance(frame, astroid.Function)):
    +                    # Special rule for function return annotations,
    +                    # which uses the same name as the class where
    +                    # the function lives.
    +                    if (PY3K and node is frame.returns and
    +                            defframe.parent_of(frame.returns)):
    +                        maybee0601 = annotation_return = True
    +
    +                    if (maybee0601 and defframe.name in defframe.locals and
    +                            defframe.locals[name][0].lineno < frame.lineno):
    +                        # Detect class assignments with the same
    +                        # name as the class. In this case, no warning
    +                        # should be raised.
    +                        maybee0601 = False
    +                elif recursive_klass:
    +                    maybee0601 = True
    +                else:
    +                    maybee0601 = maybee0601 and stmt.fromlineno <= defstmt.fromlineno
    +
                     if (maybee0601
    -                    and stmt.fromlineno <= defstmt.fromlineno
    -                    and not is_defined_before(node)
    -                    and not are_exclusive(stmt, defstmt, ('NameError', 'Exception', 'BaseException'))):
    -                    if defstmt is stmt and isinstance(node, (astroid.DelName,
    -                                                             astroid.AssName)):
    +                        and not is_defined_before(node)
    +                        and not are_exclusive(stmt, defstmt, ('NameError',
    +                                                              'Exception',
    +                                                              'BaseException'))):
    +                    if recursive_klass or (defstmt is stmt and
    +                                           isinstance(node, (astroid.DelName,
    +                                                             astroid.AssName))):
    +                        self.add_message('undefined-variable', args=name, node=node)
    +                    elif annotation_return:
                             self.add_message('undefined-variable', args=name, node=node)
                         elif self._to_consume[-1][-1] != 'lambda':
    -                        # E0601 may *not* occurs in lambda scope
    +                        # E0601 may *not* occurs in lambda scope.
                             self.add_message('used-before-assignment', args=name, node=node)
    +                    elif self._to_consume[-1][-1] == 'lambda':
    +                        # E0601 can occur in class-level scope in lambdas, as in
    +                        # the following example:
    +                        #   class A:
    +                        #      x = lambda attr: f + attr
    +                        #      f = 42
    +                        if isinstance(frame, astroid.Class) and name in frame.locals:
    +                            if isinstance(node.parent, astroid.Arguments):
    +                                # Doing the following is fine:
    +                                #   class A:
    +                                #      x = 42
    +                                #      y = lambda attr=x: attr
    +                                if stmt.fromlineno <= defstmt.fromlineno:
    +                                    self.add_message('used-before-assignment',
    +                                                     args=name, node=node)
    +                            else:
    +                                self.add_message('undefined-variable',
    +                                                 args=name, node=node)
    +
                 if isinstance(node, astroid.AssName): # Aug AssName
                     del consumed[name]
                 else:
    @@ -599,7 +881,7 @@ def visit_import(self, node):
             for name, _ in node.names:
                 parts = name.split('.')
                 try:
    -                module = node.infer_name_module(parts[0]).next()
    +                module = next(node.infer_name_module(parts[0]))
                 except astroid.ResolveError:
                     continue
                 self._check_module_attrs(node, module, parts[1:])
    @@ -611,10 +893,7 @@ def visit_from(self, node):
             level = getattr(node, 'level', None)
             try:
                 module = node.root().import_module(name_parts[0], level=level)
    -        except AstroidBuildingException:
    -            return
    -        except Exception, exc:
    -            print 'Unhandled exception in VariablesChecker:', exc
    +        except Exception: # pylint: disable=broad-except
                 return
             module = self._check_module_attrs(node, module, name_parts[1:])
             if not module:
    @@ -645,10 +924,19 @@ def _check_unpacking(self, infered, node, targets):
             """
             if infered is astroid.YES:
                 return
    +        if (isinstance(infered.parent, astroid.Arguments) and
    +                isinstance(node.value, astroid.Name) and
    +                node.value.name == infered.parent.vararg):
    +            # Variable-length argument, we can't determine the length.
    +            return
             if isinstance(infered, (astroid.Tuple, astroid.List)):
                 # attempt to check unpacking is properly balanced
                 values = infered.itered()
                 if len(targets) != len(values):
    +                # Check if we have starred nodes.
    +                if any(isinstance(target, astroid.Starred)
    +                       for target in targets):
    +                    return
                     self.add_message('unbalanced-tuple-unpacking', node=node,
                                      args=(_get_unpacking_extra_info(node, infered),
                                            len(targets),
    @@ -675,17 +963,22 @@ def _check_module_attrs(self, node, module, module_names):
             if the latest access name corresponds to a module, return it
             """
             assert isinstance(module, astroid.Module), module
    +        ignored_modules = get_global_option(self, 'ignored-modules',
    +                                            default=[])
             while module_names:
                 name = module_names.pop(0)
                 if name == '__dict__':
                     module = None
                     break
                 try:
    -                module = module.getattr(name)[0].infer().next()
    +                module = next(module.getattr(name)[0].infer())
                     if module is astroid.YES:
                         return None
                 except astroid.NotFoundError:
    -                self.add_message('no-name-in-module', args=(name, module.name), node=node)
    +                if module.name in ignored_modules:
    +                    return None
    +                self.add_message('no-name-in-module',
    +                                 args=(name, module.name), node=node)
                     return None
                 except astroid.InferenceError:
                     return None
    @@ -720,16 +1013,51 @@ def leave_module(self, node):
             """ Update consumption analysis variable
             for metaclasses.
             """
    +        module_locals = self._to_consume[0][0]
    +        module_imports = self._to_consume[0][1]
    +        consumed = {}
    +
             for klass in node.nodes_of_class(astroid.Class):
    -            if klass._metaclass:
    -                metaclass = klass.metaclass()
    -                module_locals = self._to_consume[0][0]
    +            found = metaclass = name = None
    +            if not klass._metaclass:
    +                # Skip if this class doesn't use
    +                # explictly a metaclass, but inherits it from ancestors
    +                continue
    +
    +            metaclass = klass.metaclass()
     
    +            # Look the name in the already found locals.
    +            # If it's not found there, look in the module locals
    +            # and in the imported modules.
    +            if isinstance(klass._metaclass, astroid.Name):
    +                name = klass._metaclass.name
    +            elif metaclass:
    +                # if it uses a `metaclass=module.Class`
    +                name = metaclass.root().name
    +
    +            if name:
    +                found = consumed.setdefault(
    +                    name, module_locals.get(name, module_imports.get(name)))
    +
    +            if found is None and not metaclass:
    +                name = None
                     if isinstance(klass._metaclass, astroid.Name):
    -                    module_locals.pop(klass._metaclass.name, None)
    -                if metaclass:                
    -                    # if it uses a `metaclass=module.Class`                            
    -                    module_locals.pop(metaclass.root().name, None)
    +                    name = klass._metaclass.name
    +                elif isinstance(klass._metaclass, astroid.Getattr):
    +                    name = klass._metaclass.as_string()
    +
    +                if name is not None:
    +                    if not (name in astroid.Module.scope_attrs or
    +                            is_builtin(name) or
    +                            name in self.config.additional_builtins or
    +                            name in node.locals):
    +                        self.add_message('undefined-variable',
    +                                         node=klass,
    +                                         args=(name, ))
    +        # Pop the consumed items, in order to
    +        # avoid having unused-import false positives
    +        for name in consumed:
    +            module_locals.pop(name, None)
             super(VariablesChecker3k, self).leave_module(node)
     
     if sys.version_info >= (3, 0):
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/config.py b/pymode/libs/pylint/config.py
    similarity index 91%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/config.py
    rename to pymode/libs/pylint/config.py
    index 992c2934..ebfe5789 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/config.py
    +++ b/pymode/libs/pylint/config.py
    @@ -17,6 +17,7 @@
     * pylint.d (PYLINTHOME)
     """
     from __future__ import with_statement
    +from __future__ import print_function
     
     import pickle
     import os
    @@ -50,15 +51,15 @@ def load_results(base):
         """
         data_file = get_pdata_path(base, 1)
         try:
    -        with open(data_file) as stream:
    +        with open(data_file, _PICK_LOAD) as stream:
                 return pickle.load(stream)
    -    except:
    +    except Exception: # pylint: disable=broad-except
             return {}
     
     if sys.version_info < (3, 0):
    -    _PICK_MOD = 'w'
    +    _PICK_DUMP, _PICK_LOAD = 'w', 'r'
     else:
    -    _PICK_MOD = 'wb'
    +    _PICK_DUMP, _PICK_LOAD = 'wb', 'rb'
     
     def save_results(results, base):
         """pickle results"""
    @@ -66,13 +67,13 @@ def save_results(results, base):
             try:
                 os.mkdir(PYLINT_HOME)
             except OSError:
    -            print >> sys.stderr, 'Unable to create directory %s' % PYLINT_HOME
    +            print('Unable to create directory %s' % PYLINT_HOME, file=sys.stderr)
         data_file = get_pdata_path(base, 1)
         try:
    -        with open(data_file, _PICK_MOD) as stream:
    +        with open(data_file, _PICK_DUMP) as stream:
                 pickle.dump(results, stream)
    -    except (IOError, OSError), ex:
    -        print >> sys.stderr, 'Unable to create file %s: %s' % (data_file, ex)
    +    except (IOError, OSError) as ex:
    +        print('Unable to create file %s: %s' % (data_file, ex), file=sys.stderr)
     
     # location of the configuration file ##########################################
     
    diff --git a/pymode/libs/pylint/epylint.py b/pymode/libs/pylint/epylint.py
    new file mode 100644
    index 00000000..3d73ecd3
    --- /dev/null
    +++ b/pymode/libs/pylint/epylint.py
    @@ -0,0 +1,177 @@
    +# -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4
    +# -*- vim:fenc=utf-8:ft=python:et:sw=4:ts=4:sts=4
    +# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
    +# http://www.logilab.fr/ -- mailto:contact@logilab.fr
    +#
    +# This program is free software; you can redistribute it and/or modify it under
    +# the terms of the GNU General Public License as published by the Free Software
    +# Foundation; either version 2 of the License, or (at your option) any later
    +# version.
    +#
    +# This program is distributed in the hope that it will be useful, but WITHOUT
    +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
    +#
    +# You should have received a copy of the GNU General Public License along with
    +# this program; if not, write to the Free Software Foundation, Inc.,
    +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    +"""Emacs and Flymake compatible Pylint.
    +
    +This script is for integration with emacs and is compatible with flymake mode.
    +
    +epylint walks out of python packages before invoking pylint. This avoids
    +reporting import errors that occur when a module within a package uses the
    +absolute import path to get another module within this package.
    +
    +For example:
    +    - Suppose a package is structured as
    +
    +        a/__init__.py
    +        a/b/x.py
    +        a/c/y.py
    +
    +   - Then if y.py imports x as "from a.b import x" the following produces pylint
    +     errors
    +
    +       cd a/c; pylint y.py
    +
    +   - The following obviously doesn't
    +
    +       pylint a/c/y.py
    +
    +   - As this script will be invoked by emacs within the directory of the file
    +     we are checking we need to go out of it to avoid these false positives.
    +
    +
    +You may also use py_run to run pylint with desired options and get back (or not)
    +its output.
    +"""
    +from __future__ import print_function
    +
    +import sys, os
    +import os.path as osp
    +from subprocess import Popen, PIPE
    +
    +def _get_env():
    +    '''Extracts the environment PYTHONPATH and appends the current sys.path to
    +    those.'''
    +    env = dict(os.environ)
    +    env['PYTHONPATH'] = os.pathsep.join(sys.path)
    +    return env
    +
    +def lint(filename, options=None):
    +    """Pylint the given file.
    +
    +    When run from emacs we will be in the directory of a file, and passed its
    +    filename.  If this file is part of a package and is trying to import other
    +    modules from within its own package or another package rooted in a directory
    +    below it, pylint will classify it as a failed import.
    +
    +    To get around this, we traverse down the directory tree to find the root of
    +    the package this module is in.  We then invoke pylint from this directory.
    +
    +    Finally, we must correct the filenames in the output generated by pylint so
    +    Emacs doesn't become confused (it will expect just the original filename,
    +    while pylint may extend it with extra directories if we've traversed down
    +    the tree)
    +    """
    +    # traverse downwards until we are out of a python package
    +    full_path = osp.abspath(filename)
    +    parent_path = osp.dirname(full_path)
    +    child_path = osp.basename(full_path)
    +
    +    while parent_path != "/" and osp.exists(osp.join(parent_path, '__init__.py')):
    +        child_path = osp.join(osp.basename(parent_path), child_path)
    +        parent_path = osp.dirname(parent_path)
    +
    +    # Start pylint
    +    # Ensure we use the python and pylint associated with the running epylint
    +    from pylint import lint as lint_mod
    +    lint_path = lint_mod.__file__
    +    options = options or ['--disable=C,R,I']
    +    cmd = [sys.executable, lint_path] + options + [
    +        '--msg-template', '{path}:{line}: {category} ({msg_id}, {symbol}, {obj}) {msg}',
    +        '-r', 'n', child_path]
    +    process = Popen(cmd, stdout=PIPE, cwd=parent_path, env=_get_env(),
    +                    universal_newlines=True)
    +
    +    for line in process.stdout:
    +        # remove pylintrc warning
    +        if line.startswith("No config file found"):
    +            continue
    +
    +        # modify the file name thats output to reverse the path traversal we made
    +        parts = line.split(":")
    +        if parts and parts[0] == child_path:
    +            line = ":".join([filename] + parts[1:])
    +        print(line, end=' ')
    +
    +    process.wait()
    +    return process.returncode
    +
    +
    +def py_run(command_options='', return_std=False, stdout=None, stderr=None,
    +           script='epylint'):
    +    """Run pylint from python
    +
    +    ``command_options`` is a string containing ``pylint`` command line options;
    +    ``return_std`` (boolean) indicates return of created standard output
    +    and error (see below);
    +    ``stdout`` and ``stderr`` are 'file-like' objects in which standard output
    +    could be written.
    +
    +    Calling agent is responsible for stdout/err management (creation, close).
    +    Default standard output and error are those from sys,
    +    or standalone ones (``subprocess.PIPE``) are used
    +    if they are not set and ``return_std``.
    +
    +    If ``return_std`` is set to ``True``, this function returns a 2-uple
    +    containing standard output and error related to created process,
    +    as follows: ``(stdout, stderr)``.
    +
    +    A trivial usage could be as follows:
    +        >>> py_run( '--version')
    +        No config file found, using default configuration
    +        pylint 0.18.1,
    +            ...
    +
    +    To silently run Pylint on a module, and get its standard output and error:
    +        >>> (pylint_stdout, pylint_stderr) = py_run( 'module_name.py', True)
    +    """
    +    # Create command line to call pylint
    +    if os.name == 'nt':
    +        script += '.bat'
    +    command_line = script + ' ' + command_options
    +    # Providing standard output and/or error if not set
    +    if stdout is None:
    +        if return_std:
    +            stdout = PIPE
    +        else:
    +            stdout = sys.stdout
    +    if stderr is None:
    +        if return_std:
    +            stderr = PIPE
    +        else:
    +            stderr = sys.stderr
    +    # Call pylint in a subprocess
    +    p = Popen(command_line, shell=True, stdout=stdout, stderr=stderr,
    +              env=_get_env(), universal_newlines=True)
    +    p.wait()
    +    # Return standard output and error
    +    if return_std:
    +        return (p.stdout, p.stderr)
    +
    +
    +def Run():
    +    if len(sys.argv) == 1:
    +        print("Usage: %s  [options]" % sys.argv[0])
    +        sys.exit(1)
    +    elif not osp.exists(sys.argv[1]):
    +        print("%s does not exist" % sys.argv[1])
    +        sys.exit(1)
    +    else:
    +        sys.exit(lint(sys.argv[1], sys.argv[2:]))
    +
    +
    +if __name__ == '__main__':
    +    Run()
    diff --git a/pymode/libs/pylint/gui.py b/pymode/libs/pylint/gui.py
    new file mode 100644
    index 00000000..8327e0ec
    --- /dev/null
    +++ b/pymode/libs/pylint/gui.py
    @@ -0,0 +1,531 @@
    +# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
    +# http://www.logilab.fr/ -- mailto:contact@logilab.fr
    +#
    +# This program is free software; you can redistribute it and/or modify it under
    +# the terms of the GNU General Public License as published by the Free Software
    +# Foundation; either version 2 of the License, or (at your option) any later
    +# version.
    +#
    +# This program is distributed in the hope that it will be useful, but WITHOUT
    +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
    +#
    +# You should have received a copy of the GNU General Public License along with
    +# this program; if not, write to the Free Software Foundation, Inc.,
    +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    +"""Tkinker gui for pylint"""
    +from __future__ import print_function
    +
    +import os
    +import sys
    +import re
    +from threading import Thread
    +
    +import six
    +
    +from six.moves.tkinter import (
    +    Tk, Frame, Listbox, Entry, Label, Button, Scrollbar,
    +    Checkbutton, Radiobutton, IntVar, StringVar, PanedWindow,
    +    TOP, LEFT, RIGHT, BOTTOM, END, X, Y, BOTH, SUNKEN, W,
    +    HORIZONTAL, DISABLED, NORMAL, W,
    +)
    +from six.moves.tkinter_tkfiledialog import (
    +    askopenfilename, askdirectory,
    +)
    +
    +import pylint.lint
    +from pylint.reporters.guireporter import GUIReporter
    +
    +HOME = os.path.expanduser('~/')
    +HISTORY = '.pylint-gui-history'
    +COLORS = {'(I)':'green',
    +          '(C)':'blue', '(R)':'darkblue',
    +          '(W)':'black', '(E)':'darkred',
    +          '(F)':'red'}
    +
    +
    +def convert_to_string(msg):
    +    """make a string representation of a message"""
    +    module_object = msg.module
    +    if msg.obj:
    +        module_object += ".%s" % msg.obj
    +    return "(%s) %s [%d]: %s" % (msg.C, module_object, msg.line, msg.msg)
    +
    +class BasicStream(object):
    +    '''
    +    used in gui reporter instead of writing to stdout, it is written to
    +    this stream and saved in contents
    +    '''
    +    def __init__(self, gui):
    +        """init"""
    +        self.curline = ""
    +        self.gui = gui
    +        self.contents = []
    +        self.outdict = {}
    +        self.currout = None
    +        self.next_title = None
    +
    +    def write(self, text):
    +        """write text to the stream"""
    +        if re.match('^--+$', text.strip()) or re.match('^==+$', text.strip()):
    +            if self.currout:
    +                self.outdict[self.currout].remove(self.next_title)
    +                self.outdict[self.currout].pop()
    +            self.currout = self.next_title
    +            self.outdict[self.currout] = ['']
    +
    +        if text.strip():
    +            self.next_title = text.strip()
    +
    +        if text.startswith(os.linesep):
    +            self.contents.append('')
    +            if self.currout:
    +                self.outdict[self.currout].append('')
    +        self.contents[-1] += text.strip(os.linesep)
    +        if self.currout:
    +            self.outdict[self.currout][-1] += text.strip(os.linesep)
    +        if text.endswith(os.linesep) and text.strip():
    +            self.contents.append('')
    +            if self.currout:
    +                self.outdict[self.currout].append('')
    +
    +    def fix_contents(self):
    +        """finalize what the contents of the dict should look like before output"""
    +        for item in self.outdict:
    +            num_empty = self.outdict[item].count('')
    +            for _ in range(num_empty):
    +                self.outdict[item].remove('')
    +            if self.outdict[item]:
    +                self.outdict[item].pop(0)
    +
    +    def output_contents(self):
    +        """output contents of dict to the gui, and set the rating"""
    +        self.fix_contents()
    +        self.gui.tabs = self.outdict
    +        try:
    +            self.gui.rating.set(self.outdict['Global evaluation'][0])
    +        except KeyError:
    +            self.gui.rating.set('Error')
    +        self.gui.refresh_results_window()
    +
    +        #reset stream variables for next run
    +        self.contents = []
    +        self.outdict = {}
    +        self.currout = None
    +        self.next_title = None
    +
    +
    +class LintGui(object):
    +    """Build and control a window to interact with pylint"""
    +
    +    def __init__(self, root=None):
    +        """init"""
    +        self.root = root or Tk()
    +        self.root.title('Pylint')
    +        #reporter
    +        self.reporter = None
    +        #message queue for output from reporter
    +        self.msg_queue = six.moves.queue.Queue()
    +        self.msgs = []
    +        self.visible_msgs = []
    +        self.filenames = []
    +        self.rating = StringVar()
    +        self.tabs = {}
    +        self.report_stream = BasicStream(self)
    +        #gui objects
    +        self.lb_messages = None
    +        self.showhistory = None
    +        self.results = None
    +        self.btnRun = None
    +        self.information_box = None
    +        self.convention_box = None
    +        self.refactor_box = None
    +        self.warning_box = None
    +        self.error_box = None
    +        self.fatal_box = None
    +        self.txtModule = None
    +        self.status = None
    +        self.msg_type_dict = None
    +        self.init_gui()
    +
    +    def init_gui(self):
    +        """init helper"""
    +
    +        window = PanedWindow(self.root, orient="vertical")
    +        window.pack(side=TOP, fill=BOTH, expand=True)
    +
    +        top_pane = Frame(window)
    +        window.add(top_pane)
    +        mid_pane = Frame(window)
    +        window.add(mid_pane)
    +        bottom_pane = Frame(window)
    +        window.add(bottom_pane)
    +
    +        #setting up frames
    +        top_frame = Frame(top_pane)
    +        mid_frame = Frame(top_pane)
    +        history_frame = Frame(top_pane)
    +        radio_frame = Frame(mid_pane)
    +        rating_frame = Frame(mid_pane)
    +        res_frame = Frame(mid_pane)
    +        check_frame = Frame(bottom_pane)
    +        msg_frame = Frame(bottom_pane)
    +        btn_frame = Frame(bottom_pane)
    +        top_frame.pack(side=TOP, fill=X)
    +        mid_frame.pack(side=TOP, fill=X)
    +        history_frame.pack(side=TOP, fill=BOTH, expand=True)
    +        radio_frame.pack(side=TOP, fill=X)
    +        rating_frame.pack(side=TOP, fill=X)
    +        res_frame.pack(side=TOP, fill=BOTH, expand=True)
    +        check_frame.pack(side=TOP, fill=X)
    +        msg_frame.pack(side=TOP, fill=BOTH, expand=True)
    +        btn_frame.pack(side=TOP, fill=X)
    +
    +        # Binding F5 application-wide to run lint
    +        self.root.bind('', self.run_lint)
    +
    +        #Message ListBox
    +        rightscrollbar = Scrollbar(msg_frame)
    +        rightscrollbar.pack(side=RIGHT, fill=Y)
    +        bottomscrollbar = Scrollbar(msg_frame, orient=HORIZONTAL)
    +        bottomscrollbar.pack(side=BOTTOM, fill=X)
    +        self.lb_messages = Listbox(
    +            msg_frame,
    +            yscrollcommand=rightscrollbar.set,
    +            xscrollcommand=bottomscrollbar.set,
    +            bg="white")
    +        self.lb_messages.bind("", self.show_sourcefile)
    +        self.lb_messages.pack(expand=True, fill=BOTH)
    +        rightscrollbar.config(command=self.lb_messages.yview)
    +        bottomscrollbar.config(command=self.lb_messages.xview)
    +
    +        #History ListBoxes
    +        rightscrollbar2 = Scrollbar(history_frame)
    +        rightscrollbar2.pack(side=RIGHT, fill=Y)
    +        bottomscrollbar2 = Scrollbar(history_frame, orient=HORIZONTAL)
    +        bottomscrollbar2.pack(side=BOTTOM, fill=X)
    +        self.showhistory = Listbox(
    +            history_frame,
    +            yscrollcommand=rightscrollbar2.set,
    +            xscrollcommand=bottomscrollbar2.set,
    +            bg="white")
    +        self.showhistory.pack(expand=True, fill=BOTH)
    +        rightscrollbar2.config(command=self.showhistory.yview)
    +        bottomscrollbar2.config(command=self.showhistory.xview)
    +        self.showhistory.bind('', self.select_recent_file)
    +        self.set_history_window()
    +
    +        #status bar
    +        self.status = Label(self.root, text="", bd=1, relief=SUNKEN, anchor=W)
    +        self.status.pack(side=BOTTOM, fill=X)
    +
    +        #labelbl_ratingls
    +        lbl_rating_label = Label(rating_frame, text='Rating:')
    +        lbl_rating_label.pack(side=LEFT)
    +        lbl_rating = Label(rating_frame, textvariable=self.rating)
    +        lbl_rating.pack(side=LEFT)
    +        Label(mid_frame, text='Recently Used:').pack(side=LEFT)
    +        Label(top_frame, text='Module or package').pack(side=LEFT)
    +
    +        #file textbox
    +        self.txt_module = Entry(top_frame, background='white')
    +        self.txt_module.bind('', self.run_lint)
    +        self.txt_module.pack(side=LEFT, expand=True, fill=X)
    +
    +        #results box
    +        rightscrollbar = Scrollbar(res_frame)
    +        rightscrollbar.pack(side=RIGHT, fill=Y)
    +        bottomscrollbar = Scrollbar(res_frame, orient=HORIZONTAL)
    +        bottomscrollbar.pack(side=BOTTOM, fill=X)
    +        self.results = Listbox(
    +            res_frame,
    +            yscrollcommand=rightscrollbar.set,
    +            xscrollcommand=bottomscrollbar.set,
    +            bg="white", font="Courier")
    +        self.results.pack(expand=True, fill=BOTH, side=BOTTOM)
    +        rightscrollbar.config(command=self.results.yview)
    +        bottomscrollbar.config(command=self.results.xview)
    +
    +        #buttons
    +        Button(top_frame, text='Open', command=self.file_open).pack(side=LEFT)
    +        Button(top_frame, text='Open Package',
    +               command=(lambda: self.file_open(package=True))).pack(side=LEFT)
    +
    +        self.btnRun = Button(top_frame, text='Run', command=self.run_lint)
    +        self.btnRun.pack(side=LEFT)
    +        Button(btn_frame, text='Quit', command=self.quit).pack(side=BOTTOM)
    +
    +        #radio buttons
    +        self.information_box = IntVar()
    +        self.convention_box = IntVar()
    +        self.refactor_box = IntVar()
    +        self.warning_box = IntVar()
    +        self.error_box = IntVar()
    +        self.fatal_box = IntVar()
    +        i = Checkbutton(check_frame, text="Information", fg=COLORS['(I)'],
    +                        variable=self.information_box, command=self.refresh_msg_window)
    +        c = Checkbutton(check_frame, text="Convention", fg=COLORS['(C)'],
    +                        variable=self.convention_box, command=self.refresh_msg_window)
    +        r = Checkbutton(check_frame, text="Refactor", fg=COLORS['(R)'],
    +                        variable=self.refactor_box, command=self.refresh_msg_window)
    +        w = Checkbutton(check_frame, text="Warning", fg=COLORS['(W)'],
    +                        variable=self.warning_box, command=self.refresh_msg_window)
    +        e = Checkbutton(check_frame, text="Error", fg=COLORS['(E)'],
    +                        variable=self.error_box, command=self.refresh_msg_window)
    +        f = Checkbutton(check_frame, text="Fatal", fg=COLORS['(F)'],
    +                        variable=self.fatal_box, command=self.refresh_msg_window)
    +        i.select()
    +        c.select()
    +        r.select()
    +        w.select()
    +        e.select()
    +        f.select()
    +        i.pack(side=LEFT)
    +        c.pack(side=LEFT)
    +        r.pack(side=LEFT)
    +        w.pack(side=LEFT)
    +        e.pack(side=LEFT)
    +        f.pack(side=LEFT)
    +
    +        #check boxes
    +        self.box = StringVar()
    +        # XXX should be generated
    +        report = Radiobutton(
    +            radio_frame, text="Report", variable=self.box,
    +            value="Report", command=self.refresh_results_window)
    +        raw_met = Radiobutton(
    +            radio_frame, text="Raw metrics", variable=self.box,
    +            value="Raw metrics", command=self.refresh_results_window)
    +        dup = Radiobutton(
    +            radio_frame, text="Duplication", variable=self.box,
    +            value="Duplication", command=self.refresh_results_window)
    +        ext = Radiobutton(
    +            radio_frame, text="External dependencies",
    +            variable=self.box, value="External dependencies",
    +            command=self.refresh_results_window)
    +        stat = Radiobutton(
    +            radio_frame, text="Statistics by type",
    +            variable=self.box, value="Statistics by type",
    +            command=self.refresh_results_window)
    +        msg_cat = Radiobutton(
    +            radio_frame, text="Messages by category",
    +            variable=self.box, value="Messages by category",
    +            command=self.refresh_results_window)
    +        msg = Radiobutton(
    +            radio_frame, text="Messages", variable=self.box,
    +            value="Messages", command=self.refresh_results_window)
    +        source_file = Radiobutton(
    +            radio_frame, text="Source File", variable=self.box,
    +            value="Source File", command=self.refresh_results_window)
    +        report.select()
    +        report.grid(column=0, row=0, sticky=W)
    +        raw_met.grid(column=1, row=0, sticky=W)
    +        dup.grid(column=2, row=0, sticky=W)
    +        msg.grid(column=3, row=0, sticky=W)
    +        stat.grid(column=0, row=1, sticky=W)
    +        msg_cat.grid(column=1, row=1, sticky=W)
    +        ext.grid(column=2, row=1, sticky=W)
    +        source_file.grid(column=3, row=1, sticky=W)
    +
    +        #dictionary for check boxes and associated error term
    +        self.msg_type_dict = {
    +            'I': lambda: self.information_box.get() == 1,
    +            'C': lambda: self.convention_box.get() == 1,
    +            'R': lambda: self.refactor_box.get() == 1,
    +            'E': lambda: self.error_box.get() == 1,
    +            'W': lambda: self.warning_box.get() == 1,
    +            'F': lambda: self.fatal_box.get() == 1
    +        }
    +        self.txt_module.focus_set()
    +
    +
    +    def select_recent_file(self, event): # pylint: disable=unused-argument
    +        """adds the selected file in the history listbox to the Module box"""
    +        if not self.showhistory.size():
    +            return
    +
    +        selected = self.showhistory.curselection()
    +        item = self.showhistory.get(selected)
    +        #update module
    +        self.txt_module.delete(0, END)
    +        self.txt_module.insert(0, item)
    +
    +    def refresh_msg_window(self):
    +        """refresh the message window with current output"""
    +        #clear the window
    +        self.lb_messages.delete(0, END)
    +        self.visible_msgs = []
    +        for msg in self.msgs:
    +            if self.msg_type_dict.get(msg.C)():
    +                self.visible_msgs.append(msg)
    +                msg_str = convert_to_string(msg)
    +                self.lb_messages.insert(END, msg_str)
    +                fg_color = COLORS.get(msg_str[:3], 'black')
    +                self.lb_messages.itemconfigure(END, fg=fg_color)
    +
    +    def refresh_results_window(self):
    +        """refresh the results window with current output"""
    +        #clear the window
    +        self.results.delete(0, END)
    +        try:
    +            for res in self.tabs[self.box.get()]:
    +                self.results.insert(END, res)
    +        except KeyError:
    +            pass
    +
    +    def process_incoming(self):
    +        """process the incoming messages from running pylint"""
    +        while self.msg_queue.qsize():
    +            try:
    +                msg = self.msg_queue.get(0)
    +                if msg == "DONE":
    +                    self.report_stream.output_contents()
    +                    return False
    +
    +                #adding message to list of msgs
    +                self.msgs.append(msg)
    +
    +                #displaying msg if message type is selected in check box
    +                if self.msg_type_dict.get(msg.C)():
    +                    self.visible_msgs.append(msg)
    +                    msg_str = convert_to_string(msg)
    +                    self.lb_messages.insert(END, msg_str)
    +                    fg_color = COLORS.get(msg_str[:3], 'black')
    +                    self.lb_messages.itemconfigure(END, fg=fg_color)
    +
    +            except six.moves.queue.Empty:
    +                pass
    +        return True
    +
    +    def periodic_call(self):
    +        """determine when to unlock the run button"""
    +        if self.process_incoming():
    +            self.root.after(100, self.periodic_call)
    +        else:
    +            #enabling button so it can be run again
    +            self.btnRun.config(state=NORMAL)
    +
    +    def mainloop(self):
    +        """launch the mainloop of the application"""
    +        self.root.mainloop()
    +
    +    def quit(self, _=None):
    +        """quit the application"""
    +        self.root.quit()
    +
    +    def halt(self): # pylint: disable=no-self-use
    +        """program halt placeholder"""
    +        return
    +
    +    def file_open(self, package=False, _=None):
    +        """launch a file browser"""
    +        if not package:
    +            filename = askopenfilename(parent=self.root,
    +                                       filetypes=[('pythonfiles', '*.py'),
    +                                                  ('allfiles', '*')],
    +                                       title='Select Module')
    +        else:
    +            filename = askdirectory(title="Select A Folder", mustexist=1)
    +
    +        if filename == ():
    +            return
    +
    +        self.txt_module.delete(0, END)
    +        self.txt_module.insert(0, filename)
    +
    +    def update_filenames(self):
    +        """update the list of recent filenames"""
    +        filename = self.txt_module.get()
    +        if not filename:
    +            filename = os.getcwd()
    +        if filename+'\n' in self.filenames:
    +            index = self.filenames.index(filename+'\n')
    +            self.filenames.pop(index)
    +
    +        #ensure only 10 most recent are stored
    +        if len(self.filenames) == 10:
    +            self.filenames.pop()
    +        self.filenames.insert(0, filename+'\n')
    +
    +    def set_history_window(self):
    +        """update the history window with info from the history file"""
    +        #clear the window
    +        self.showhistory.delete(0, END)
    +        # keep the last 10 most recent files
    +        try:
    +            view_history = open(HOME+HISTORY, 'r')
    +            for hist in view_history.readlines():
    +                if not hist in self.filenames:
    +                    self.filenames.append(hist)
    +                self.showhistory.insert(END, hist.split('\n')[0])
    +            view_history.close()
    +        except IOError:
    +            # do nothing since history file will be created later
    +            return
    +
    +    def run_lint(self, _=None):
    +        """launches pylint"""
    +        self.update_filenames()
    +        self.root.configure(cursor='watch')
    +        self.reporter = GUIReporter(self, output=self.report_stream)
    +        module = self.txt_module.get()
    +        if not module:
    +            module = os.getcwd()
    +
    +        #cleaning up msgs and windows
    +        self.msgs = []
    +        self.visible_msgs = []
    +        self.lb_messages.delete(0, END)
    +        self.tabs = {}
    +        self.results.delete(0, END)
    +        self.btnRun.config(state=DISABLED)
    +
    +        #setting up a worker thread to run pylint
    +        worker = Thread(target=lint_thread, args=(module, self.reporter, self,))
    +        self.periodic_call()
    +        worker.start()
    +
    +        # Overwrite the .pylint-gui-history file with all the new recently added files
    +        # in order from filenames but only save last 10 files
    +        write_history = open(HOME+HISTORY, 'w')
    +        write_history.writelines(self.filenames)
    +        write_history.close()
    +        self.set_history_window()
    +
    +        self.root.configure(cursor='')
    +
    +    def show_sourcefile(self, event=None):  # pylint: disable=unused-argument
    +        selected = self.lb_messages.curselection()
    +        if not selected:
    +            return
    +
    +        msg = self.visible_msgs[int(selected[0])]
    +        scroll = msg.line - 3
    +        if scroll < 0:
    +            scroll = 0
    +
    +        self.tabs["Source File"] = open(msg.path, "r").readlines()
    +        self.box.set("Source File")
    +        self.refresh_results_window()
    +        self.results.yview(scroll)
    +        self.results.select_set(msg.line - 1)
    +
    +
    +def lint_thread(module, reporter, gui):
    +    """thread for pylint"""
    +    gui.status.text = "processing module(s)"
    +    pylint.lint.Run(args=[module], reporter=reporter, exit=False)
    +    gui.msg_queue.put("DONE")
    +
    +
    +def Run(args):
    +    """launch pylint gui from args"""
    +    if args:
    +        print('USAGE: pylint-gui\n launch a simple pylint gui using Tk')
    +        sys.exit(1)
    +    gui = LintGui()
    +    gui.mainloop()
    +    sys.exit(0)
    +
    +if __name__ == '__main__':
    +    Run(sys.argv[1:])
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/interfaces.py b/pymode/libs/pylint/interfaces.py
    similarity index 76%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/interfaces.py
    rename to pymode/libs/pylint/interfaces.py
    index 50f2c839..64f5a956 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/interfaces.py
    +++ b/pymode/libs/pylint/interfaces.py
    @@ -10,10 +10,22 @@
     # You should have received a copy of the GNU General Public License along with
     # this program; if not, write to the Free Software Foundation, Inc.,
     # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
    -"""Interfaces for PyLint objects"""
    +"""Interfaces for Pylint objects"""
    +from collections import namedtuple
     
     from logilab.common.interface import Interface
     
    +Confidence = namedtuple('Confidence', ['name', 'description'])
    +# Warning Certainties
    +HIGH = Confidence('HIGH', 'No false positive possible.')
    +INFERENCE = Confidence('INFERENCE', 'Warning based on inference result.')
    +INFERENCE_FAILURE = Confidence('INFERENCE_FAILURE',
    +                               'Warning based on inference with failures.')
    +UNDEFINED = Confidence('UNDEFINED',
    +                       'Warning without any associated confidence level.')
    +
    +CONFIDENCE_LEVELS = [HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED]
    +
     
     class IChecker(Interface):
         """This is an base interface, not designed to be used elsewhere than for
    @@ -34,7 +46,7 @@ class IRawChecker(IChecker):
         def process_module(self, astroid):
             """ process a module
     
    -        the module's content is accessible via astroid.file_stream
    +        the module's content is accessible via astroid.stream
             """
     
     
    diff --git a/pymode/libs/pylama/lint/pylama_pylint/pylint/lint.py b/pymode/libs/pylint/lint.py
    similarity index 56%
    rename from pymode/libs/pylama/lint/pylama_pylint/pylint/lint.py
    rename to pymode/libs/pylint/lint.py
    index 529fbd44..01fc2f5d 100644
    --- a/pymode/libs/pylama/lint/pylama_pylint/pylint/lint.py
    +++ b/pymode/libs/pylint/lint.py
    @@ -25,46 +25,67 @@
     
       Display help messages about given message identifiers and exit.
     """
    +from __future__ import print_function
     
    -# import this first to avoid builtin namespace pollution
    -from pylint.checkers import utils
    -
    -import functools
    -import sys
    +import collections
    +import contextlib
    +import itertools
    +import operator
     import os
    +try:
    +    import multiprocessing
    +except ImportError:
    +    multiprocessing = None
    +import sys
     import tokenize
    -from warnings import warn
    +import warnings
     
    -from logilab.common.configuration import UnsupportedAction, OptionsManagerMixIn
    -from logilab.common.optik_ext import check_csv
    -from logilab.common.modutils import load_module_from_name, get_module_part
    -from logilab.common.interface import implements
    -from logilab.common.textutils import splitstrip, unquote
    -from logilab.common.ureports import Table, Text, Section
    -from logilab.common.__pkginfo__ import version as common_version
    -
    -from astroid import MANAGER, nodes, AstroidBuildingException
    +import astroid
     from astroid.__pkginfo__ import version as astroid_version
    -
    -from pylint.utils import (
    -    MSG_TYPES, OPTION_RGX,
    -    PyLintASTWalker, UnknownMessage, MessagesHandlerMixIn, ReportsHandlerMixIn,
    -    EmptyReport, WarningScope,
    -    expand_modules, tokenize_module)
    -from pylint.interfaces import IRawChecker, ITokenChecker, IAstroidChecker
    -from pylint.checkers import (BaseTokenChecker,
    -                             table_lines_from_stats,
    -                             initialize as checkers_initialize)
    -from pylint.reporters import initialize as reporters_initialize
    +from astroid import modutils
    +from logilab.common import configuration
    +from logilab.common import optik_ext
    +from logilab.common import interface
    +from logilab.common import textutils
    +from logilab.common import ureports
    +from logilab.common import __version__ as common_version
    +import six
    +
    +from pylint import checkers
    +from pylint import interfaces
    +from pylint import reporters
    +from pylint import utils
     from pylint import config
    -
     from pylint.__pkginfo__ import version
     
     
    +MANAGER = astroid.MANAGER
    +INCLUDE_IDS_HELP = ("Deprecated. It was used to include message\'s "
    +                    "id in output. Use --msg-template instead.")
    +SYMBOLS_HELP = ("Deprecated. It was used to include symbolic ids of "
    +                "messages in output. Use --msg-template instead.")
    +
    +def _get_new_args(message):
    +    location = (
    +        message.abspath,
    +        message.path,
    +        message.module,
    +        message.obj,
    +        message.line,
    +        message.column,
    +    )
    +    return (
    +        message.msg_id,
    +        message.symbol,
    +        location,
    +        message.msg,
    +        message.confidence,
    +    )
     
     def _get_python_path(filepath):
    -    dirname = os.path.dirname(os.path.realpath(
    -            os.path.expanduser(filepath)))
    +    dirname = os.path.realpath(os.path.expanduser(filepath))
    +    if not os.path.isdir(dirname):
    +        dirname = os.path.dirname(dirname)
         while True:
             if not os.path.exists(os.path.join(dirname, "__init__.py")):
                 return dirname
    @@ -74,6 +95,38 @@ def _get_python_path(filepath):
                 return os.getcwd()
     
     
    +def _merge_stats(stats):
    +    merged = {}
    +    for stat in stats:
    +        for key, item in six.iteritems(stat):
    +            if key not in merged:
    +                merged[key] = item
    +            else:
    +                if isinstance(item, dict):
    +                    merged[key].update(item)
    +                else:
    +                    merged[key] = merged[key] + item
    +    return merged
    +
    +
    +@contextlib.contextmanager
    +def _patch_sysmodules():
    +    # Context manager that permits running pylint, on Windows, with -m switch
    +    # and with --jobs, as in 'python -2 -m pylint .. --jobs'.
    +    # For more details why this is needed,
    +    # see Python issue http://bugs.python.org/issue10845.
    +
    +    mock_main = __name__ != '__main__' # -m switch
    +    if mock_main:
    +        sys.modules['__main__'] = sys.modules[__name__]
    +
    +    try:
    +        yield
    +    finally:
    +        if mock_main:
    +            sys.modules.pop('__main__')
    +
    +
     # Python Linter class #########################################################
     
     MSGS = {
    @@ -93,7 +146,7 @@ def _get_python_path(filepath):
         'F0010': ('error while code parsing: %s',
                   'parse-error',
                   'Used when an exception occured while building the Astroid '
    -               'representation which could be handled by astroid.'),
    +              'representation which could be handled by astroid.'),
     
         'I0001': ('Unable to run raw checkers on built-in module %s',
                   'raw-checker-failed',
    @@ -130,7 +183,7 @@ def _get_python_path(filepath):
                   'deprecated-pragma',
                   'Some inline pylint options have been renamed or reworked, '
                   'only the most recent form should be used. '
    -              'NOTE:skip-all is only available with pylint >= 0.26', 
    +              'NOTE:skip-all is only available with pylint >= 0.26',
                   {'old_names': [('I0014', 'deprecated-disable-all')]}),
     
         'E0001': ('%s',
    @@ -146,15 +199,63 @@ def _get_python_path(filepath):
         }
     
     
    -def _deprecated_option(shortname, opt_type):
    -    def _warn_deprecated(option, optname, *args):
    +def _deprecated_option(shortname, opt_type, help_msg):
    +    def _warn_deprecated(option, optname, *args): # pylint: disable=unused-argument
             sys.stderr.write('Warning: option %s is deprecated and ignored.\n' % (optname,))
    -    return {'short': shortname, 'help': 'DEPRECATED', 'hide': True,
    +    return {'short': shortname, 'help': help_msg, 'hide': True,
                 'type': opt_type, 'action': 'callback', 'callback': _warn_deprecated}
     
     
    -class PyLinter(OptionsManagerMixIn, MessagesHandlerMixIn, ReportsHandlerMixIn,
    -               BaseTokenChecker):
    +if multiprocessing is not None:
    +    class ChildLinter(multiprocessing.Process): # pylint: disable=no-member
    +        def run(self):
    +            tasks_queue, results_queue, self._config = self._args # pylint: disable=no-member
    +
    +            self._config["jobs"] = 1  # Child does not parallelize any further.
    +            self._python3_porting_mode = self._config.pop(
    +                'python3_porting_mode', None)
    +
    +            # Run linter for received files/modules.
    +            for file_or_module in iter(tasks_queue.get, 'STOP'):
    +                result = self._run_linter(file_or_module[0])
    +                try:
    +                    results_queue.put(result)
    +                except Exception as ex:
    +                    print("internal error with sending report for module %s" %
    +                          file_or_module, file=sys.stderr)
    +                    print(ex, file=sys.stderr)
    +                    results_queue.put({})
    +
    +        def _run_linter(self, file_or_module):
    +            linter = PyLinter()
    +
    +            # Register standard checkers.
    +            linter.load_default_plugins()
    +            # Load command line plugins.
    +            # TODO linter.load_plugin_modules(self._plugins)
    +
    +            linter.load_configuration(**self._config)
    +            linter.set_reporter(reporters.CollectingReporter())
    +
    +            # Enable the Python 3 checker mode. This option is
    +            # passed down from the parent linter up to here, since
    +            # the Python 3 porting flag belongs to the Run class,
    +            # instead of the Linter class.
    +            if self._python3_porting_mode:
    +                linter.python3_porting_mode()
    +
    +            # Run the checks.
    +            linter.check(file_or_module)
    +
    +            msgs = [_get_new_args(m) for m in linter.reporter.messages]
    +            return (file_or_module, linter.file_state.base_name, linter.current_name,
    +                    msgs, linter.stats, linter.msg_status)
    +
    +
    +class PyLinter(configuration.OptionsManagerMixIn,
    +               utils.MessagesHandlerMixIn,
    +               utils.ReportsHandlerMixIn,
    +               checkers.BaseTokenChecker):
         """lint Python modules using external checkers.
     
         This is the main checker controlling the other ones and the reports
    @@ -168,13 +269,12 @@ class PyLinter(OptionsManagerMixIn, MessagesHandlerMixIn, ReportsHandlerMixIn,
         to ensure the latest code version is actually checked.
         """
     
    -    __implements__ = (ITokenChecker,)
    +    __implements__ = (interfaces.ITokenChecker, )
     
         name = 'master'
         priority = 0
         level = 0
         msgs = MSGS
    -    may_be_disabled = False
     
         @staticmethod
         def make_options():
    @@ -182,7 +282,7 @@ def make_options():
                      {'type' : 'csv', 'metavar' : '[,...]',
                       'dest' : 'black_list', 'default' : ('CVS',),
                       'help' : 'Add files or directories to the blacklist. '
    -                  'They should be base names, not paths.'}),
    +                           'They should be base names, not paths.'}),
                     ('persistent',
                      {'default': True, 'type' : 'yn', 'metavar' : '',
                       'level': 1,
    @@ -192,88 +292,133 @@ def make_options():
                      {'type' : 'csv', 'metavar' : '', 'default' : (),
                       'level': 1,
                       'help' : 'List of plugins (as comma separated values of '
    -                  'python modules names) to load, usually to register '
    -                  'additional checkers.'}),
    +                           'python modules names) to load, usually to register '
    +                           'additional checkers.'}),
     
                     ('output-format',
                      {'default': 'text', 'type': 'string', 'metavar' : '',
                       'short': 'f',
                       'group': 'Reports',
                       'help' : 'Set the output format. Available formats are text,'
    -                  ' parseable, colorized, msvs (visual studio) and html. You '
    -                  'can also give a reporter class, eg mypackage.mymodule.'
    -                  'MyReporterClass.'}),
    +                           ' parseable, colorized, msvs (visual studio) and html. You '
    +                           'can also give a reporter class, eg mypackage.mymodule.'
    +                           'MyReporterClass.'}),
     
                     ('files-output',
                      {'default': 0, 'type' : 'yn', 'metavar' : '',
                       'group': 'Reports', 'level': 1,
                       'help' : 'Put messages in a separate file for each module / '
    -                  'package specified on the command line instead of printing '
    -                  'them on stdout. Reports (if any) will be written in a file '
    -                  'name "pylint_global.[txt|html]".'}),
    +                           'package specified on the command line instead of printing '
    +                           'them on stdout. Reports (if any) will be written in a file '
    +                           'name "pylint_global.[txt|html]".'}),
     
                     ('reports',
                      {'default': 1, 'type' : 'yn', 'metavar' : '',
                       'short': 'r',
                       'group': 'Reports',
                       'help' : 'Tells whether to display a full report or only the '
    -                  'messages'}),
    +                           'messages'}),
     
                     ('evaluation',
                      {'type' : 'string', 'metavar' : '',
                       'group': 'Reports', 'level': 1,
                       'default': '10.0 - ((float(5 * error + warning + refactor + '
    -                  'convention) / statement) * 10)',
    -                  'help' : 'Python expression which should return a note less \
    -than 10 (10 is the highest note). You have access to the variables errors \
    -warning, statement which respectively contain the number of errors / warnings\
    - messages and the total number of statements analyzed. This is used by the \
    - global evaluation report (RP0004).'}),
    +                             'convention) / statement) * 10)',
    +                  'help' : 'Python expression which should return a note less '
    +                           'than 10 (10 is the highest note). You have access '
    +                           'to the variables errors warning, statement which '
    +                           'respectively contain the number of errors / '
    +                           'warnings messages and the total number of '
    +                           'statements analyzed. This is used by the global '
    +                           'evaluation report (RP0004).'}),
     
                     ('comment',
                      {'default': 0, 'type' : 'yn', 'metavar' : '',
                       'group': 'Reports', 'level': 1,
                       'help' : 'Add a comment according to your evaluation note. '
    -                  'This is used by the global evaluation report (RP0004).'}),
    +                           'This is used by the global evaluation report (RP0004).'}),
    +
    +                ('confidence',
    +                 {'type' : 'multiple_choice', 'metavar': '',
    +                  'default': '',
    +                  'choices': [c.name for c in interfaces.CONFIDENCE_LEVELS],
    +                  'group': 'Messages control',
    +                  'help' : 'Only show warnings with the listed confidence levels.'
    +                           ' Leave empty to show all. Valid levels: %s' % (
    +                               ', '.join(c.name for c in interfaces.CONFIDENCE_LEVELS),)}),
     
                     ('enable',
                      {'type' : 'csv', 'metavar': '',
                       'short': 'e',
                       'group': 'Messages control',
                       'help' : 'Enable the message, report, category or checker with the '
    -                  'given id(s). You can either give multiple identifier '
    -                  'separated by comma (,) or put this option multiple time. '
    -                  'See also the "--disable" option for examples. '}),
    +                           'given id(s). You can either give multiple identifier '
    +                           'separated by comma (,) or put this option multiple time. '
    +                           'See also the "--disable" option for examples. '}),
     
                     ('disable',
                      {'type' : 'csv', 'metavar': '',
                       'short': 'd',
                       'group': 'Messages control',
                       'help' : 'Disable the message, report, category or checker '
    -                  'with the given id(s). You can either give multiple identifiers'
    -                  ' separated by comma (,) or put this option multiple times '
    -                  '(only on the command line, not in the configuration file '
    -                  'where it should appear only once).'
    -                  'You can also use "--disable=all" to disable everything first '
    -                  'and then reenable specific checks. For example, if you want '
    -                  'to run only the similarities checker, you can use '
    -                  '"--disable=all --enable=similarities". '
    -                  'If you want to run only the classes checker, but have no '
    -                  'Warning level messages displayed, use'
    -                  '"--disable=all --enable=classes --disable=W"'}),
    +                           'with the given id(s). You can either give multiple identifiers'
    +                           ' separated by comma (,) or put this option multiple times '
    +                           '(only on the command line, not in the configuration file '
    +                           'where it should appear only once).'
    +                           'You can also use "--disable=all" to disable everything first '
    +                           'and then reenable specific checks. For example, if you want '
    +                           'to run only the similarities checker, you can use '
    +                           '"--disable=all --enable=similarities". '
    +                           'If you want to run only the classes checker, but have no '
    +                           'Warning level messages displayed, use'
    +                           '"--disable=all --enable=classes --disable=W"'}),
     
                     ('msg-template',
                      {'type' : 'string', 'metavar': '