Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit b2fda23

Browse files
committed
close files correctly
1 parent bbb0412 commit b2fda23

1 file changed

Lines changed: 12 additions & 11 deletions

File tree

Lib/trace.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -404,24 +404,25 @@ def find_strings(filename, encoding=None):
404404
# If the first token is a string, then it's the module docstring.
405405
# Add this special case so that the test in the loop passes.
406406
prev_ttype = token.INDENT
407-
f = open(filename, encoding=encoding)
408-
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
409-
if ttype == token.STRING:
410-
if prev_ttype == token.INDENT:
411-
sline, scol = start
412-
eline, ecol = end
413-
for i in range(sline, eline + 1):
414-
d[i] = 1
415-
prev_ttype = ttype
416-
f.close()
407+
with open(filename, encoding=encoding) as f:
408+
tok = tokenize.generate_tokens(f.readline)
409+
for ttype, tstr, start, end, line in tok:
410+
if ttype == token.STRING:
411+
if prev_ttype == token.INDENT:
412+
sline, scol = start
413+
eline, ecol = end
414+
for i in range(sline, eline + 1):
415+
d[i] = 1
416+
prev_ttype = ttype
417417
return d
418418

419419
def find_executable_linenos(filename):
420420
"""Return dict where keys are line numbers in the line number table."""
421421
try:
422422
with io.FileIO(filename, 'r') as file:
423423
encoding, lines = tokenize.detect_encoding(file.readline)
424-
prog = open(filename, "r", encoding=encoding).read()
424+
with open(filename, "r", encoding=encoding) as f:
425+
prog = f.read()
425426
except IOError as err:
426427
print(("Not printing coverage data for %r: %s"
427428
% (filename, err)), file=sys.stderr)

0 commit comments

Comments
 (0)