Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit eea96c5

Browse files
committed
code cleanup
1 parent b12aa8a commit eea96c5

12 files changed

Lines changed: 27 additions & 27 deletions

extra/chardet/chardistribution.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def __init__(self):
4242
self._mTableSize = None # Size of above table
4343
self._mTypicalDistributionRatio = None # This is a constant value which varies from language to language, used in calculating confidence. See http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html for further detail.
4444
self.reset()
45-
45+
4646
def reset(self):
4747
"""reset analyser, clear any state"""
4848
self._mDone = constants.False # If this flag is set to constants.True, detection is done and conclusion has been made
@@ -87,7 +87,7 @@ def get_order(self, aStr):
8787
# convert this encoding string to a number, here called order.
8888
# This allows multiple encodings of a language to share one frequency table.
8989
return -1
90-
90+
9191
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
9292
def __init__(self):
9393
CharDistributionAnalysis.__init__(self)

extra/chardet/charsetgroupprober.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def __init__(self):
3434
self._mActiveNum = 0
3535
self._mProbers = []
3636
self._mBestGuessProber = None
37-
37+
3838
def reset(self):
3939
CharSetProber.reset(self)
4040
self._mActiveNum = 0

extra/chardet/charsetprober.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@
3131
class CharSetProber:
3232
def __init__(self):
3333
pass
34-
34+
3535
def reset(self):
3636
self._mState = constants.eDetecting
37-
37+
3838
def get_charset_name(self):
3939
return None
4040

@@ -50,11 +50,11 @@ def get_confidence(self):
5050
def filter_high_bit_only(self, aBuf):
5151
aBuf = re.sub(r'([\x00-\x7F])+', ' ', aBuf)
5252
return aBuf
53-
53+
5454
def filter_without_english_letters(self, aBuf):
5555
aBuf = re.sub(r'([A-Za-z])+', ' ', aBuf)
5656
return aBuf
57-
57+
5858
def filter_with_english_letters(self, aBuf):
5959
# TODO
6060
return aBuf

extra/chardet/escprober.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,5 +75,5 @@ def feed(self, aBuf):
7575
self._mState = constants.eFoundIt
7676
self._mDetectedCharset = codingSM.get_coding_state_machine()
7777
return self.get_state()
78-
78+
7979
return self.get_state()

extra/chardet/eucjpprober.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def __init__(self):
4444
def reset(self):
4545
MultiByteCharSetProber.reset(self)
4646
self._mContextAnalyzer.reset()
47-
47+
4848
def get_charset_name(self):
4949
return "EUC-JP"
5050

@@ -69,9 +69,9 @@ def feed(self, aBuf):
6969
else:
7070
self._mContextAnalyzer.feed(aBuf[i-1:i+1], charLen)
7171
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
72-
72+
7373
self._mLastChar[0] = aBuf[aLen - 1]
74-
74+
7575
if self.get_state() == constants.eDetecting:
7676
if self._mContextAnalyzer.got_enough_data() and \
7777
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):

extra/chardet/hebrewprober.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def reset(self):
164164
self._mPrev = ' '
165165
self._mBeforePrev = ' '
166166
# These probers are owned by the group prober.
167-
167+
168168
def set_model_probers(self, logicalProber, visualProber):
169169
self._mLogicalProber = logicalProber
170170
self._mVisualProber = visualProber
@@ -184,7 +184,7 @@ def is_non_final(self, c):
184184
# these letters as Non-Final letters outweighs the damage since these words
185185
# are quite rare.
186186
return c in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
187-
187+
188188
def feed(self, aBuf):
189189
# Final letter analysis for logical-visual decision.
190190
# Look for evidence that the received buffer is either logical Hebrew or
@@ -215,7 +215,7 @@ def feed(self, aBuf):
215215
return constants.eNotMe
216216

217217
aBuf = self.filter_high_bit_only(aBuf)
218-
218+
219219
for cur in aBuf:
220220
if cur == ' ':
221221
# We stand on a space - a word just ended

extra/chardet/jpcntx.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@
123123
class JapaneseContextAnalysis:
124124
def __init__(self):
125125
self.reset()
126-
126+
127127
def reset(self):
128128
self._mTotalRel = 0 # total sequence received
129129
self._mRelSample = [0] * NUM_OF_CATEGORY # category counters, each interger counts sequence in its category
@@ -133,7 +133,7 @@ def reset(self):
133133

134134
def feed(self, aBuf, aLen):
135135
if self._mDone: return
136-
136+
137137
# The buffer we got is byte oriented, and a character may span in more than one
138138
# buffers. In case the last one or two byte in last buffer is not complete, we
139139
# record how many byte needed to complete that character and skip these bytes here.
@@ -158,7 +158,7 @@ def feed(self, aBuf, aLen):
158158

159159
def got_enough_data(self):
160160
return self._mTotalRel > ENOUGH_REL_THRESHOLD
161-
161+
162162
def get_confidence(self):
163163
# This is just one way to calculate confidence. It works well for me.
164164
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
@@ -168,7 +168,7 @@ def get_confidence(self):
168168

169169
def get_order(self, aStr):
170170
return -1, 1
171-
171+
172172
class SJISContextAnalysis(JapaneseContextAnalysis):
173173
def get_order(self, aStr):
174174
if not aStr: return -1, 1

extra/chardet/latin1prober.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ def feed(self, aBuf):
122122
def get_confidence(self):
123123
if self.get_state() == constants.eNotMe:
124124
return 0.01
125-
125+
126126
total = reduce(operator.add, self._mFreqCounter)
127127
if total < 0.01:
128128
confidence = 0.0

extra/chardet/mbcharsetprober.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,9 +68,9 @@ def feed(self, aBuf):
6868
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
6969
else:
7070
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
71-
71+
7272
self._mLastChar[0] = aBuf[aLen - 1]
73-
73+
7474
if self.get_state() == constants.eDetecting:
7575
if self._mDistributionAnalyzer.got_enough_data() and \
7676
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):

extra/chardet/sbcharsetprober.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
NUMBER_OF_SEQ_CAT = 4
3838
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
3939
#NEGATIVE_CAT = 0
40-
40+
4141
class SingleByteCharSetProber(CharSetProber):
4242
def __init__(self, model, reversed=constants.False, nameProber=None):
4343
CharSetProber.__init__(self)

0 commit comments

Comments
 (0)