diff --git a/docs/source/config_file.rst b/docs/source/config_file.rst index a6b64578ea29..9c5b935bf25c 100644 --- a/docs/source/config_file.rst +++ b/docs/source/config_file.rst @@ -74,9 +74,6 @@ The following global flags may only be set in the global section - ``warn_redundant_casts`` (Boolean, default False) warns about casting an expression to its inferred type. -- ``warn_unused_ignores`` (Boolean, default False) warns about - unneeded ``# type: ignore`` comments. - - ``warn_unused_configs`` (Boolean, default False) warns about per-module sections in the config file that didn't match any files processed in the current run. @@ -203,6 +200,9 @@ overridden by the pattern sections matching the module name. returning a value with type ``Any`` from a function declared with a non- ``Any`` return type. +- ``warn_unused_ignores`` (Boolean, default False) warns about + unneeded ``# type: ignore`` comments. + - ``strict_boolean`` (Boolean, default False) makes using non-boolean expressions in conditions an error. diff --git a/mypy/build.py b/mypy/build.py index 50140c5550c5..f3f369189086 100644 --- a/mypy/build.py +++ b/mypy/build.py @@ -80,7 +80,7 @@ def __init__(self, manager: 'BuildManager', graph: Graph) -> None: self.graph = graph self.files = manager.modules self.types = manager.all_types # Non-empty for tests only or if dumping deps - self.errors = manager.errors.messages() + self.errors = [] # type: List[str] # Filled in by build if desired class BuildSource: @@ -133,6 +133,7 @@ def build(sources: List[BuildSource], alt_lib_path: Optional[str] = None, bin_dir: Optional[str] = None, saved_cache: Optional[SavedCache] = None, + flush_errors: Optional[Callable[[List[str], bool], None]] = None, ) -> BuildResult: """Analyze a program. @@ -142,6 +143,11 @@ def build(sources: List[BuildSource], Return BuildResult if successful or only non-blocking errors were found; otherwise raise CompileError. + If a flush_errors callback is provided, all error messages will be + passed to it and the errors and messages fields of BuildResult and + CompileError (respectively) will be empty. Otherwise those fields will + report any error messages. + Args: sources: list of sources to build options: build options @@ -150,7 +156,40 @@ def build(sources: List[BuildSource], bin_dir: directory containing the mypy script, used for finding data directories; if omitted, use '.' as the data directory saved_cache: optional dict with saved cache state for dmypy (read-write!) + flush_errors: optional function to flush errors after a file is processed + """ + # If we were not given a flush_errors, we use one that will populate those + # fields for callers that want the traditional API. + messages = [] + + def default_flush_errors(new_messages: List[str], is_serious: bool) -> None: + messages.extend(new_messages) + + flush_errors = flush_errors or default_flush_errors + + try: + result = _build(sources, options, alt_lib_path, bin_dir, saved_cache, flush_errors) + result.errors = messages + return result + except CompileError as e: + # CompileErrors raised from an errors object carry all of the + # messages that have not been reported out by error streaming. + # Patch it up to contain either none or all none of the messages, + # depending on whether we are flushing errors. + serious = not e.use_stdout + flush_errors(e.messages, serious) + e.messages = messages + raise + + +def _build(sources: List[BuildSource], + options: Options, + alt_lib_path: Optional[str], + bin_dir: Optional[str], + saved_cache: Optional[SavedCache], + flush_errors: Callable[[List[str], bool], None], + ) -> BuildResult: # This seems the most reasonable place to tune garbage collection. gc.set_threshold(50000) @@ -212,7 +251,8 @@ def build(sources: List[BuildSource], version_id=__version__, plugin=plugin, errors=errors, - saved_cache=saved_cache) + saved_cache=saved_cache, + flush_errors=flush_errors) try: graph = dispatch(sources, manager) @@ -518,6 +558,7 @@ class BuildManager: version_id: The current mypy version (based on commit id when possible) plugin: Active mypy plugin(s) errors: Used for reporting all errors + flush_errors: A function for processing errors after each SCC saved_cache: Dict with saved cache state for dmypy and fine-grained incremental mode (read-write!) stats: Dict with various instrumentation numbers @@ -532,6 +573,7 @@ def __init__(self, data_dir: str, version_id: str, plugin: Plugin, errors: Errors, + flush_errors: Callable[[List[str], bool], None], saved_cache: Optional[SavedCache] = None, ) -> None: self.start_time = time.time() @@ -555,6 +597,7 @@ def __init__(self, data_dir: str, self.stale_modules = set() # type: Set[str] self.rechecked_modules = set() # type: Set[str] self.plugin = plugin + self.flush_errors = flush_errors self.saved_cache = saved_cache if saved_cache is not None else {} # type: SavedCache self.stats = {} # type: Dict[str, Any] # Values are ints or floats @@ -1973,6 +2016,10 @@ def write_cache(self) -> None: def dependency_priorities(self) -> List[int]: return [self.priorities.get(dep, PRI_HIGH) for dep in self.dependencies] + def generate_unused_ignore_notes(self) -> None: + if self.options.warn_unused_ignores: + self.manager.errors.generate_unused_ignore_notes(self.xpath) + def dispatch(sources: List[BuildSource], manager: BuildManager) -> Graph: set_orig = set(manager.saved_cache) @@ -1999,9 +2046,6 @@ def dispatch(sources: List[BuildSource], manager: BuildManager) -> Graph: dump_graph(graph) return graph process_graph(graph, manager) - if manager.options.warn_unused_ignores: - # TODO: This could also be a per-module option. - manager.errors.generate_unused_ignore_notes() updated = preserve_cache(graph) set_updated = set(updated) manager.saved_cache.clear() @@ -2490,6 +2534,8 @@ def process_stale_scc(graph: Graph, scc: List[str], manager: BuildManager) -> No graph[id].transitive_error = True for id in stale: graph[id].finish_passes() + graph[id].generate_unused_ignore_notes() + manager.flush_errors(manager.errors.file_messages(graph[id].xpath), False) graph[id].write_cache() graph[id].mark_as_rechecked() diff --git a/mypy/errors.py b/mypy/errors.py index 923c5924422f..189dfb2c3af3 100644 --- a/mypy/errors.py +++ b/mypy/errors.py @@ -51,6 +51,9 @@ class ErrorInfo: # Only report this particular messages once per program. only_once = False + # Actual origin of the error message + origin = None # type: Tuple[str, int] + # Fine-grained incremental target where this was reported target = None # type: Optional[str] @@ -90,15 +93,17 @@ class Errors: current error context (nested imports). """ - # List of generated error messages. - error_info = None # type: List[ErrorInfo] + # Map from files to generated error messages. Is an OrderedDict so + # that it can be used to order messages based on the order the + # files were processed. + error_info_map = None # type: Dict[str, List[ErrorInfo]] + + # Files that we have reported the errors for + flushed_files = None # type: Set[str] # Current error context: nested import context/stack, as a list of (path, line) pairs. import_ctx = None # type: List[Tuple[str, int]] - # Set of files with errors. - error_files = None # type: Set[str] - # Path name prefix that is removed from all paths, if set. ignore_prefix = None # type: str @@ -141,9 +146,9 @@ def __init__(self, show_error_context: bool = False, self.initialize() def initialize(self) -> None: - self.error_info = [] + self.error_info_map = OrderedDict() + self.flushed_files = set() self.import_ctx = [] - self.error_files = set() self.type_name = [None] self.function_or_member = [None] self.ignored_lines = OrderedDict() @@ -289,8 +294,14 @@ def report(self, target=self.current_target()) self.add_error_info(info) + def _add_error_info(self, file: str, info: ErrorInfo) -> None: + assert file not in self.flushed_files + if file not in self.error_info_map: + self.error_info_map[file] = [] + self.error_info_map[file].append(info) + def add_error_info(self, info: ErrorInfo) -> None: - (file, line) = cast(Tuple[str, int], info.origin) # see issue 1855 + file, line = info.origin if not info.blocker: # Blockers cannot be ignored if file in self.ignored_lines and line in self.ignored_lines[file]: # Annotation requests us to ignore all errors on this line. @@ -302,18 +313,17 @@ def add_error_info(self, info: ErrorInfo) -> None: if info.message in self.only_once_messages: return self.only_once_messages.add(info.message) - self.error_info.append(info) - self.error_files.add(file) - - def generate_unused_ignore_notes(self) -> None: - for file, ignored_lines in self.ignored_lines.items(): - if not self.is_typeshed_file(file): - for line in ignored_lines - self.used_ignored_lines[file]: - # Don't use report since add_error_info will ignore the error! - info = ErrorInfo(self.import_context(), file, self.current_module(), None, - None, line, -1, 'note', "unused 'type: ignore' comment", - False, False) - self.error_info.append(info) + self._add_error_info(file, info) + + def generate_unused_ignore_notes(self, file: str) -> None: + ignored_lines = self.ignored_lines[file] + if not self.is_typeshed_file(file): + for line in ignored_lines - self.used_ignored_lines[file]: + # Don't use report since add_error_info will ignore the error! + info = ErrorInfo(self.import_context(), file, self.current_module(), None, + None, line, -1, 'note', "unused 'type: ignore' comment", + False, False) + self._add_error_info(file, info) def is_typeshed_file(self, file: str) -> bool: # gross, but no other clear way to tell @@ -321,43 +331,46 @@ def is_typeshed_file(self, file: str) -> bool: def num_messages(self) -> int: """Return the number of generated messages.""" - return len(self.error_info) + return sum(len(x) for x in self.error_info_map.values()) def is_errors(self) -> bool: """Are there any generated errors?""" - return bool(self.error_info) + return bool(self.error_info_map) def is_blockers(self) -> bool: """Are the any errors that are blockers?""" - return any(err for err in self.error_info if err.blocker) + return any(err for errs in self.error_info_map.values() for err in errs if err.blocker) def blocker_module(self) -> Optional[str]: """Return the module with a blocking error, or None if not possible.""" - for err in self.error_info: - if err.blocker: - return err.module + for errs in self.error_info_map.values(): + for err in errs: + if err.blocker: + return err.module return None def is_errors_for_file(self, file: str) -> bool: """Are there any errors for the given file?""" - return file in self.error_files + return file in self.error_info_map def raise_error(self) -> None: """Raise a CompileError with the generated messages. Render the messages suitable for displaying. """ - raise CompileError(self.messages(), + # self.new_messages() will format all messages that haven't already + # been returned from a file_messages() call. + raise CompileError(self.new_messages(), use_stdout=True, module_with_blocker=self.blocker_module()) - def messages(self) -> List[str]: + def format_messages(self, error_info: List[ErrorInfo]) -> List[str]: """Return a string list that represents the error messages. Use a form suitable for displaying to the user. """ a = [] # type: List[str] - errors = self.render_messages(self.sort_messages(self.error_info)) + errors = self.render_messages(self.sort_messages(error_info)) errors = self.remove_duplicates(errors) for file, line, column, severity, message in errors: s = '' @@ -375,12 +388,36 @@ def messages(self) -> List[str]: a.append(s) return a + def file_messages(self, path: str) -> List[str]: + """Return a string list of new error messages from a given file. + + Use a form suitable for displaying to the user. + """ + if path not in self.error_info_map: + return [] + self.flushed_files.add(path) + return self.format_messages(self.error_info_map[path]) + + def new_messages(self) -> List[str]: + """Return a string list of new error messages. + + Use a form suitable for displaying to the user. + Errors from different files are ordered based on the order in which + they first generated an error. + """ + msgs = [] + for path in self.error_info_map.keys(): + if path not in self.flushed_files: + msgs.extend(self.file_messages(path)) + return msgs + def targets(self) -> Set[str]: """Return a set of all targets that contain errors.""" # TODO: Make sure that either target is always defined or that not being defined # is okay for fine-grained incremental checking. return set(info.target - for info in self.error_info + for errs in self.error_info_map.values() + for info in errs if info.target) def render_messages(self, errors: List[ErrorInfo]) -> List[Tuple[Optional[str], int, int, @@ -461,7 +498,7 @@ def render_messages(self, errors: List[ErrorInfo]) -> List[Tuple[Optional[str], def sort_messages(self, errors: List[ErrorInfo]) -> List[ErrorInfo]: """Sort an array of error messages locally by line number. - I.e., sort a run of consecutive messages with the same file + I.e., sort a run of consecutive messages with the same context by line number, but otherwise retain the general ordering of the messages. """ @@ -511,6 +548,12 @@ class CompileError(Exception): It can be a parse, semantic analysis, type check or other compilation-related error. + + CompileErrors raised from an errors object carry all of the + messages that have not been reported out by error streaming. + This is patched up by build.build to contain either all error + messages (if errors were streamed) or none (if they were not). + """ messages = None # type: List[str] @@ -554,7 +597,7 @@ def report_internal_error(err: Exception, file: Optional[str], line: int, # Dump out errors so far, they often provide a clue. # But catch unexpected errors rendering them. try: - for msg in errors.messages(): + for msg in errors.new_messages(): print(msg) except Exception as e: print("Failed to dump errors:", repr(e), file=sys.stderr) diff --git a/mypy/main.py b/mypy/main.py index 2b3deae49f62..5037224f0d5c 100644 --- a/mypy/main.py +++ b/mypy/main.py @@ -8,7 +8,7 @@ import sys import time -from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Tuple +from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Callable from mypy import build from mypy import defaults @@ -61,12 +61,23 @@ def main(script_path: Optional[str], args: Optional[List[str]] = None) -> None: if args is None: args = sys.argv[1:] sources, options = process_options(args) + + messages = [] + + def flush_errors(new_messages: List[str], serious: bool) -> None: + messages.extend(new_messages) + f = sys.stderr if serious else sys.stdout + try: + for msg in new_messages: + f.write(msg + '\n') + f.flush() + except BrokenPipeError: + sys.exit(1) + serious = False try: - res = type_check_only(sources, bin_dir, options) - a = res.errors + type_check_only(sources, bin_dir, options, flush_errors) except CompileError as e: - a = e.messages if not e.use_stdout: serious = True if options.warn_unused_configs and options.unused_configs: @@ -76,14 +87,8 @@ def main(script_path: Optional[str], args: Optional[List[str]] = None) -> None: file=sys.stderr) if options.junit_xml: t1 = time.time() - util.write_junit_xml(t1 - t0, serious, a, options.junit_xml) - if a: - f = sys.stderr if serious else sys.stdout - try: - for m in a: - f.write(m + '\n') - except BrokenPipeError: - pass + util.write_junit_xml(t1 - t0, serious, messages, options.junit_xml) + if messages: sys.exit(1) @@ -112,11 +117,13 @@ def readlinkabs(link: str) -> str: def type_check_only(sources: List[BuildSource], bin_dir: Optional[str], - options: Options) -> BuildResult: + options: Options, + flush_errors: Optional[Callable[[List[str], bool], None]]) -> BuildResult: # Type-check the program and dependencies. return build.build(sources=sources, bin_dir=bin_dir, - options=options) + options=options, + flush_errors=flush_errors) FOOTER = """environment variables: diff --git a/mypy/messages.py b/mypy/messages.py index 7c0df42894f6..3c13a2c41db1 100644 --- a/mypy/messages.py +++ b/mypy/messages.py @@ -153,8 +153,9 @@ def copy(self) -> 'MessageBuilder': def add_errors(self, messages: 'MessageBuilder') -> None: """Add errors in messages to this builder.""" if self.disable_count <= 0: - for info in messages.errors.error_info: - self.errors.add_error_info(info) + for errs in messages.errors.error_info_map.values(): + for info in errs: + self.errors.add_error_info(info) def disable_errors(self) -> None: self.disable_count += 1 diff --git a/mypy/options.py b/mypy/options.py index dd9bfe08c095..0a826f16a405 100644 --- a/mypy/options.py +++ b/mypy/options.py @@ -34,6 +34,7 @@ class Options: "show_none_errors", "warn_no_return", "warn_return_any", + "warn_unused_ignores", "ignore_errors", "strict_boolean", "no_implicit_optional", diff --git a/mypy/server/update.py b/mypy/server/update.py index 73b224f43379..d6cb0707b846 100644 --- a/mypy/server/update.py +++ b/mypy/server/update.py @@ -195,9 +195,9 @@ def update_single(self, module: str, path: str) -> Tuple[List[str], result = update_single_isolated(module, path, manager, previous_modules) if isinstance(result, BlockedUpdate): # Blocking error -- just give up - module, path, remaining = result + module, path, remaining, errors = result self.previous_modules = get_module_to_path_map(manager) - return manager.errors.messages(), remaining, (module, path), True + return errors, remaining, (module, path), True assert isinstance(result, NormalUpdate) # Work around #4124 module, path, remaining, tree, graph = result @@ -230,7 +230,7 @@ def update_single(self, module: str, path: str) -> Tuple[List[str], self.previous_modules = get_module_to_path_map(manager) self.type_maps = extract_type_maps(graph) - return manager.errors.messages(), remaining, (module, path), False + return manager.errors.new_messages(), remaining, (module, path), False def mark_all_meta_as_memory_only(graph: Dict[str, State], @@ -271,7 +271,8 @@ def get_all_dependencies(manager: BuildManager, graph: Dict[str, State], # are similar to NormalUpdate (but there are fewer). BlockedUpdate = NamedTuple('BlockedUpdate', [('module', str), ('path', str), - ('remaining', List[Tuple[str, str]])]) + ('remaining', List[Tuple[str, str]]), + ('messages', List[str])]) UpdateResult = Union[NormalUpdate, BlockedUpdate] @@ -318,7 +319,7 @@ def update_single_isolated(module: str, remaining_modules = [(module, path)] else: remaining_modules = [] - return BlockedUpdate(err.module_with_blocker, path, remaining_modules) + return BlockedUpdate(err.module_with_blocker, path, remaining_modules, err.messages) if not os.path.isfile(path): graph = delete_module(module, graph, manager) @@ -353,7 +354,7 @@ def update_single_isolated(module: str, manager.modules.clear() manager.modules.update(old_modules) del graph[module] - return BlockedUpdate(module, path, remaining_modules) + return BlockedUpdate(module, path, remaining_modules, err.messages) state.semantic_analysis_pass_three() state.semantic_analysis_apply_patches() diff --git a/mypy/test/testcheck.py b/mypy/test/testcheck.py index 65c3d89eae21..0bdf645e25c7 100644 --- a/mypy/test/testcheck.py +++ b/mypy/test/testcheck.py @@ -169,6 +169,7 @@ def run_case_once(self, testcase: DataDrivenTestCase, incremental_step: int = 0) # Always set to none so we're forced to reread the module in incremental mode sources.append(BuildSource(program_path, module_name, None if incremental_step else program_text)) + res = None try: res = build.build(sources=sources, diff --git a/mypy/test/testerrorstream.py b/mypy/test/testerrorstream.py new file mode 100644 index 000000000000..55e4e3d9228e --- /dev/null +++ b/mypy/test/testerrorstream.py @@ -0,0 +1,51 @@ +"""Tests for mypy incremental error output.""" +from typing import List, Callable, Optional + +import os + +from mypy import defaults, build +from mypy.test.config import test_temp_dir +from mypy.myunit import AssertionFailure +from mypy.test.helpers import assert_string_arrays_equal +from mypy.test.data import DataDrivenTestCase, DataSuite +from mypy.build import BuildSource +from mypy.errors import CompileError +from mypy.options import Options +from mypy.nodes import CallExpr, StrExpr +from mypy.types import Type + + +class ErrorStreamSuite(DataSuite): + files = ['errorstream.test'] + + def run_case(self, testcase: DataDrivenTestCase) -> None: + test_error_stream(testcase) + + +def test_error_stream(testcase: DataDrivenTestCase) -> None: + """Perform a single error streaming test case. + + The argument contains the description of the test case. + """ + options = Options() + options.show_traceback = True + + logged_messages = [] # type: List[str] + + def flush_errors(msgs: List[str], serious: bool) -> None: + if msgs: + logged_messages.append('==== Errors flushed ====') + logged_messages.extend(msgs) + + sources = [BuildSource('main', '__main__', '\n'.join(testcase.input))] + try: + build.build(sources=sources, + options=options, + alt_lib_path=test_temp_dir, + flush_errors=flush_errors) + except CompileError as e: + assert e.messages == [] + + assert_string_arrays_equal(testcase.output, logged_messages, + 'Invalid output ({}, line {})'.format( + testcase.file, testcase.line)) diff --git a/mypy/test/testgraph.py b/mypy/test/testgraph.py index dbbe4872aa75..e47234925b0b 100644 --- a/mypy/test/testgraph.py +++ b/mypy/test/testgraph.py @@ -49,6 +49,7 @@ def _make_manager(self) -> BuildManager: version_id=__version__, plugin=Plugin(options), errors=errors, + flush_errors=lambda msgs, serious: None, ) return manager diff --git a/runtests.py b/runtests.py index d4712bbfbabb..e9d9a000c695 100755 --- a/runtests.py +++ b/runtests.py @@ -213,7 +213,8 @@ def test_path(*names: str): 'testtransform', 'testtypegen', 'testparse', - 'testsemanal' + 'testsemanal', + 'testerrorstream', ) SLOW_FILES = test_path( diff --git a/test-data/unit/check-kwargs.test b/test-data/unit/check-kwargs.test index f65b626ace8f..d7be6685ef88 100644 --- a/test-data/unit/check-kwargs.test +++ b/test-data/unit/check-kwargs.test @@ -390,8 +390,16 @@ A.B(x=1) # E: Unexpected keyword argument "x" for "B" [case testUnexpectedMethodKwargFromOtherModule] import m -m.A(x=1) # E: Unexpected keyword argument "x" for "A" +m.A(x=1) [file m.py] +1+'asdf' class A: - def __init__(self) -> None: # N: "A" defined here + def __init__(self) -> None: pass +[out] +-- Note that the messages appear "out of order" because the m.py:3 +-- message is really an attachment to the main:2 error and should be +-- reported with it. +tmp/m.py:1: error: Unsupported operand types for + ("int" and "str") +main:2: error: Unexpected keyword argument "x" for "A" +tmp/m.py:3: note: "A" defined here diff --git a/test-data/unit/cmdline.test b/test-data/unit/cmdline.test index 9ed8e602e278..27d57cd7f449 100644 --- a/test-data/unit/cmdline.test +++ b/test-data/unit/cmdline.test @@ -400,9 +400,9 @@ bla bla [file error.py] bla bla [out] +normal.py:2: error: Unsupported operand types for + ("int" and "str") main.py:4: note: Import of 'error' ignored main.py:4: note: (Using --follow-imports=error, module not passed on command line) -normal.py:2: error: Unsupported operand types for + ("int" and "str") main.py:5: error: Revealed type is 'builtins.int' main.py:6: error: Revealed type is 'builtins.int' main.py:7: error: Revealed type is 'Any' diff --git a/test-data/unit/errorstream.test b/test-data/unit/errorstream.test new file mode 100644 index 000000000000..6877a2098f88 --- /dev/null +++ b/test-data/unit/errorstream.test @@ -0,0 +1,54 @@ +-- Test cases for incremental error streaming. +-- Each time errors are reported, '==== Errors flushed ====' is printed. + +[case testErrorStream] +import b +[file a.py] +1 + '' +[file b.py] +import a +'' / 2 +[out] +==== Errors flushed ==== +a.py:1: error: Unsupported operand types for + ("int" and "str") +==== Errors flushed ==== +b.py:2: error: Unsupported operand types for / ("str" and "int") + +[case testBlockers] +import b +[file a.py] +1 + '' +[file b.py] +import a +break +1 / '' # won't get reported, after a blocker +[out] +==== Errors flushed ==== +a.py:1: error: Unsupported operand types for + ("int" and "str") +==== Errors flushed ==== +b.py:2: error: 'break' outside loop + +[case testCycles] +import a +[file a.py] +import b +1 + '' +def f() -> int: + reveal_type(b.x) + return b.x +y = 0 + 0 +[file b.py] +import a +def g() -> int: + reveal_type(a.y) + return a.y +1 / '' +x = 1 + 1 + +[out] +==== Errors flushed ==== +b.py:3: error: Revealed type is 'builtins.int' +b.py:5: error: Unsupported operand types for / ("int" and "str") +==== Errors flushed ==== +a.py:2: error: Unsupported operand types for + ("int" and "str") +a.py:4: error: Revealed type is 'builtins.int' diff --git a/test-data/unit/fine-grained-blockers.test b/test-data/unit/fine-grained-blockers.test index f4af0626a185..9eaf25eeea05 100644 --- a/test-data/unit/fine-grained-blockers.test +++ b/test-data/unit/fine-grained-blockers.test @@ -255,9 +255,6 @@ a.py:1: error: invalid syntax main:1: error: Cannot find module named 'a' main:1: note: (Perhaps setting MYPYPATH or using the "--ignore-missing-imports" flag would help) b.py:1: error: Cannot find module named 'a' --- TODO: Remove redundant errors -main:1: error: Cannot find module named 'a' -b.py:1: error: Cannot find module named 'a' [case testModifyFileWhileBlockingErrorElsewhere] import a diff --git a/test-data/unit/fine-grained.test b/test-data/unit/fine-grained.test index 3f7eecdbb06b..6022004d2260 100644 --- a/test-data/unit/fine-grained.test +++ b/test-data/unit/fine-grained.test @@ -1032,8 +1032,8 @@ main:2: error: Revealed type is 'contextlib.GeneratorContextManager[builtins.Non == a.py:1: error: Cannot find module named 'b' a.py:1: note: (Perhaps setting MYPYPATH or using the "--ignore-missing-imports" flag would help) -main:2: error: Revealed type is 'contextlib.GeneratorContextManager[builtins.None]' a.py:3: error: Cannot find module named 'b' +main:2: error: Revealed type is 'contextlib.GeneratorContextManager[builtins.None]' == main:2: error: Revealed type is 'contextlib.GeneratorContextManager[builtins.None]'