Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 36986a9

Browse files
committed
Use enum.Enum in a couple cases.
1 parent 656ef67 commit 36986a9

File tree

2 files changed

+28
-27
lines changed

2 files changed

+28
-27
lines changed

lib/matplotlib/dviread.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,17 @@
1818
1919
"""
2020
from collections import namedtuple
21+
import enum
2122
from functools import lru_cache, partial, wraps
2223
import logging
23-
import numpy as np
2424
import os
2525
import re
2626
import struct
2727
import sys
2828
import textwrap
2929

30+
import numpy as np
31+
3032
from matplotlib import cbook, rcParams
3133
from matplotlib.compat import subprocess
3234

@@ -48,7 +50,7 @@
4850
# just stops reading)
4951
# finale: the finale (unimplemented in our current implementation)
5052

51-
_dvistate = cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
53+
_dvistate = enum.Enum('DviState', 'pre outer inpage post_post finale')
5254

5355
# The marks on a page consist of text and boxes. A page also has dimensions.
5456
Page = namedtuple('Page', 'text boxes height width descent')
@@ -301,7 +303,7 @@ def _read(self):
301303
self._dtable[byte](self, byte)
302304
if byte == 140: # end of page
303305
return True
304-
if self.state == _dvistate.post_post: # end of file
306+
if self.state is _dvistate.post_post: # end of file
305307
self.close()
306308
return False
307309

@@ -622,7 +624,7 @@ def _read(self):
622624
while True:
623625
byte = self.file.read(1)[0]
624626
# If we are in a packet, execute the dvi instructions
625-
if self.state == _dvistate.inpage:
627+
if self.state is _dvistate.inpage:
626628
byte_at = self.file.tell()-1
627629
if byte_at == packet_ends:
628630
self._finalize_packet(packet_char, packet_width)
@@ -678,7 +680,7 @@ def _finalize_packet(self, packet_char, packet_width):
678680
self.state = _dvistate.outer
679681

680682
def _pre(self, i, x, cs, ds):
681-
if self.state != _dvistate.pre:
683+
if self.state is not _dvistate.pre:
682684
raise ValueError("pre command in middle of vf file")
683685
if i != 202:
684686
raise ValueError("Unknown vf format %d" % i)

lib/matplotlib/type1font.py

Lines changed: 21 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import six
2929

3030
import binascii
31+
import enum
3132
import io
3233
import itertools
3334
import re
@@ -40,6 +41,11 @@ def ord(x):
4041
return x
4142

4243

44+
# token types
45+
_TokenType = enum.Enum('_TokenType',
46+
'whitespace name string delimiter number')
47+
48+
4349
class Type1Font(object):
4450
"""
4551
A class representing a Type-1 font, for use by backends.
@@ -143,25 +149,18 @@ def _split(self, data):
143149
_comment_re = re.compile(br'%[^\r\n\v]*')
144150
_instring_re = re.compile(br'[()\\]')
145151

146-
# token types, compared via object identity (poor man's enum)
147-
_whitespace = object()
148-
_name = object()
149-
_string = object()
150-
_delimiter = object()
151-
_number = object()
152-
153152
@classmethod
154153
def _tokens(cls, text):
155154
"""
156155
A PostScript tokenizer. Yield (token, value) pairs such as
157-
(cls._whitespace, ' ') or (cls._name, '/Foobar').
156+
(_TokenType.whitespace, ' ') or (_TokenType.name, '/Foobar').
158157
"""
159158
pos = 0
160159
while pos < len(text):
161160
match = (cls._comment_re.match(text[pos:]) or
162161
cls._whitespace_re.match(text[pos:]))
163162
if match:
164-
yield (cls._whitespace, match.group())
163+
yield (_TokenType.whitespace, match.group())
165164
pos += match.end()
166165
elif text[pos] == b'(':
167166
start = pos
@@ -178,25 +177,25 @@ def _tokens(cls, text):
178177
depth -= 1
179178
else: # a backslash - skip the next character
180179
pos += 1
181-
yield (cls._string, text[start:pos])
180+
yield (_TokenType.string, text[start:pos])
182181
elif text[pos:pos + 2] in (b'<<', b'>>'):
183-
yield (cls._delimiter, text[pos:pos + 2])
182+
yield (_TokenType.delimiter, text[pos:pos + 2])
184183
pos += 2
185184
elif text[pos] == b'<':
186185
start = pos
187186
pos += text[pos:].index(b'>')
188-
yield (cls._string, text[start:pos])
187+
yield (_TokenType.string, text[start:pos])
189188
else:
190189
match = cls._token_re.match(text[pos:])
191190
if match:
192191
try:
193192
float(match.group())
194-
yield (cls._number, match.group())
193+
yield (_TokenType.number, match.group())
195194
except ValueError:
196-
yield (cls._name, match.group())
195+
yield (_TokenType.name, match.group())
197196
pos += match.end()
198197
else:
199-
yield (cls._delimiter, text[pos:pos + 1])
198+
yield (_TokenType.delimiter, text[pos:pos + 1])
200199
pos += 1
201200

202201
def _parse(self):
@@ -210,23 +209,23 @@ def _parse(self):
210209
'UnderlinePosition': -100, 'UnderlineThickness': 50}
211210
filtered = ((token, value)
212211
for token, value in self._tokens(self.parts[0])
213-
if token is not self._whitespace)
212+
if token is not _TokenType.whitespace)
214213
# The spec calls this an ASCII format; in Python 2.x we could
215214
# just treat the strings and names as opaque bytes but let's
216215
# turn them into proper Unicode, and be lenient in case of high bytes.
217216
convert = lambda x: x.decode('ascii', 'replace')
218217
for token, value in filtered:
219-
if token is self._name and value.startswith(b'/'):
218+
if token is _TokenType.name and value.startswith(b'/'):
220219
key = convert(value[1:])
221220
token, value = next(filtered)
222-
if token is self._name:
221+
if token is _TokenType.name:
223222
if value in (b'true', b'false'):
224223
value = value == b'true'
225224
else:
226225
value = convert(value.lstrip(b'/'))
227-
elif token is self._string:
226+
elif token is _TokenType.string:
228227
value = convert(value.lstrip(b'(').rstrip(b')'))
229-
elif token is self._number:
228+
elif token is _TokenType.number:
230229
if b'.' in value:
231230
value = float(value)
232231
else:
@@ -284,7 +283,7 @@ def replacer(tokens):
284283
token, value = next(tokens) # name, e.g., /FontMatrix
285284
yield bytes(value)
286285
token, value = next(tokens) # possible whitespace
287-
while token is cls._whitespace:
286+
while token is _TokenType.whitespace:
288287
yield bytes(value)
289288
token, value = next(tokens)
290289
if value != b'[': # name/number/etc.
@@ -309,7 +308,7 @@ def suppress(tokens):
309308
b'/UniqueID': suppress}
310309

311310
for token, value in tokens:
312-
if token is cls._name and value in table:
311+
if token is _TokenType.name and value in table:
313312
for value in table[value](itertools.chain([(token, value)],
314313
tokens)):
315314
yield value

0 commit comments

Comments
 (0)