Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit c2d384d

Browse filesBrowse files
authored
bpo-33338: [tokenize] Minor code cleanup (#6573)
This change contains minor things that make diffing between Lib/tokenize.py and Lib/lib2to3/pgen2/tokenize.py cleaner.
1 parent d5a2377 commit c2d384d
Copy full SHA for c2d384d

File tree

Expand file treeCollapse file tree

1 file changed

+8
-11
lines changed
Filter options
Expand file treeCollapse file tree

1 file changed

+8
-11
lines changed

‎Lib/tokenize.py

Copy file name to clipboardExpand all lines: Lib/tokenize.py
+8-11Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@
2828
from codecs import lookup, BOM_UTF8
2929
import collections
3030
from io import TextIOWrapper
31-
from itertools import chain
3231
import itertools as _itertools
3332
import re
3433
import sys
@@ -278,7 +277,7 @@ def compat(self, token, iterable):
278277
startline = token[0] in (NEWLINE, NL)
279278
prevstring = False
280279

281-
for tok in chain([token], iterable):
280+
for tok in _itertools.chain([token], iterable):
282281
toknum, tokval = tok[:2]
283282
if toknum == ENCODING:
284283
self.encoding = tokval
@@ -475,13 +474,10 @@ def tokenize(readline):
475474
The first token sequence will always be an ENCODING token
476475
which tells you which encoding was used to decode the bytes stream.
477476
"""
478-
# This import is here to avoid problems when the itertools module is not
479-
# built yet and tokenize is imported.
480-
from itertools import chain, repeat
481477
encoding, consumed = detect_encoding(readline)
482-
rl_gen = iter(readline, b"")
483-
empty = repeat(b"")
484-
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
478+
empty = _itertools.repeat(b"")
479+
rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
480+
return _tokenize(rl_gen.__next__, encoding)
485481

486482

487483
def _tokenize(readline, encoding):
@@ -496,7 +492,7 @@ def _tokenize(readline, encoding):
496492
# BOM will already have been stripped.
497493
encoding = "utf-8"
498494
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
499-
while True: # loop over lines in stream
495+
while True: # loop over lines in stream
500496
try:
501497
line = readline()
502498
except StopIteration:
@@ -581,7 +577,7 @@ def _tokenize(readline, encoding):
581577
continue
582578
token, initial = line[start:end], line[start]
583579

584-
if (initial in numchars or # ordinary number
580+
if (initial in numchars or # ordinary number
585581
(initial == '.' and token != '.' and token != '...')):
586582
yield TokenInfo(NUMBER, token, spos, epos, line)
587583
elif initial in '\r\n':
@@ -667,7 +663,8 @@ def main():
667663

668664
# Helper error handling routines
669665
def perror(message):
670-
print(message, file=sys.stderr)
666+
sys.stderr.write(message)
667+
sys.stderr.write('\n')
671668

672669
def error(message, filename=None, location=None):
673670
if location:

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.