Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit b15d35c

Browse files
committed
Lint fixes.
1 parent 2a59a12 commit b15d35c

20 files changed

Lines changed: 127 additions & 123 deletions

sdks/python/apache_beam/dataflow_test.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -114,8 +114,8 @@ def process(self, context, prefix, suffix):
114114
words = pipeline | 'SomeWords' >> Create(words_list)
115115
prefix = 'zyx'
116116
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
117-
result = words | 'DecorateWordsDoFn' >> ParDo(SomeDoFn(), prefix,
118-
suffix=AsSingleton(suffix))
117+
result = words | 'DecorateWordsDoFn' >> ParDo(
118+
SomeDoFn(), prefix, suffix=AsSingleton(suffix))
119119
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
120120
pipeline.run()
121121

@@ -179,8 +179,7 @@ def test_default_value_singleton_side_input(self):
179179
pipeline = Pipeline('DirectPipelineRunner')
180180
pcol = pipeline | 'start' >> Create([1, 2])
181181
side = pipeline | 'side' >> Create([]) # 0 values in side input.
182-
result = (
183-
pcol | 'compute' >> FlatMap(lambda x, s: [x * s], AsSingleton(side, 10)))
182+
result = pcol | FlatMap(lambda x, s: [x * s], AsSingleton(side, 10))
184183
assert_that(result, equal_to([10, 20]))
185184
pipeline.run()
186185

sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,11 +105,12 @@ def run(argv=None): # pylint: disable=missing-docstring
105105
# Group each coordinate triplet by its x value, then write the coordinates to
106106
# the output file with an x-coordinate grouping per line.
107107
# pylint: disable=expression-not-assigned
108-
(coordinates | 'x coord key' >> beam.Map(lambda (x, y, i): (x, (x, y, i)))
109-
| 'x coord' >> beam.GroupByKey() | beam.Map(
110-
'format',
108+
(coordinates
109+
| 'x coord key' >> beam.Map(lambda (x, y, i): (x, (x, y, i)))
110+
| 'x coord' >> beam.GroupByKey()
111+
| 'format' >> beam.Map(
111112
lambda (k, coords): ' '.join('(%s, %s, %s)' % coord for coord in coords))
112-
| 'write' >> beam.io.Write(beam.io.TextFileSink(known_args.coordinate_output)))
113+
| beam.io.Write(beam.io.TextFileSink(known_args.coordinate_output)))
113114
# pylint: enable=expression-not-assigned
114115
p.run()
115116

sdks/python/apache_beam/examples/complete/tfidf_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def test_tfidf_transform(self):
5656
result = (
5757
uri_to_line
5858
| tfidf.TfIdf()
59-
| 'flatten' >> beam.Map(lambda (word, (uri, tfidf)): (word, uri, tfidf)))
59+
| beam.Map(lambda (word, (uri, tfidf)): (word, uri, tfidf)))
6060
beam.assert_that(result, beam.equal_to(EXPECTED_RESULTS))
6161
# Run the pipeline. Note that the assert_that above adds to the pipeline
6262
# a check that the result PCollection contains expected values. To actually

sdks/python/apache_beam/examples/cookbook/bigquery_side_input.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -101,20 +101,20 @@ def run(argv=None):
101101
ignore_corpus = known_args.ignore_corpus
102102
ignore_word = known_args.ignore_word
103103

104-
pcoll_corpus = p | beam.Read('read corpus',
105-
beam.io.BigQuerySource(query=query_corpus))
106-
pcoll_word = p | beam.Read('read words',
107-
beam.io.BigQuerySource(query=query_word))
108-
pcoll_ignore_corpus = p | 'create_ignore_corpus' >> beam.Create([ignore_corpus])
104+
pcoll_corpus = p | 'read corpus' >> beam.io.Read(
105+
beam.io.BigQuerySource(query=query_corpus))
106+
pcoll_word = p | 'read_words' >> beam.Read(
107+
beam.io.BigQuerySource(query=query_word))
108+
pcoll_ignore_corpus = p | 'create_ignore_corpus' >> beam.Create(
109+
[ignore_corpus])
109110
pcoll_ignore_word = p | 'create_ignore_word' >> beam.Create([ignore_word])
110111
pcoll_group_ids = p | 'create groups' >> beam.Create(group_ids)
111112

112113
pcoll_groups = create_groups(pcoll_group_ids, pcoll_corpus, pcoll_word,
113114
pcoll_ignore_corpus, pcoll_ignore_word)
114115

115116
# pylint:disable=expression-not-assigned
116-
pcoll_groups | beam.io.Write('WriteToText',
117-
beam.io.TextFileSink(known_args.output))
117+
pcoll_groups | beam.io.Write(beam.io.TextFileSink(known_args.output))
118118
p.run()
119119

120120

sdks/python/apache_beam/examples/cookbook/bigquery_side_input_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ def test_create_groups(self):
3535
{'f': 'corpus2'},
3636
{'f': 'corpus3'}])
3737
words_pcoll = p | 'create_words' >> beam.Create([{'f': 'word1'},
38-
{'f': 'word2'},
39-
{'f': 'word3'}])
38+
{'f': 'word2'},
39+
{'f': 'word3'}])
4040
ignore_corpus_pcoll = p | 'create_ignore_corpus' >> beam.Create(['corpus1'])
4141
ignore_word_pcoll = p | 'create_ignore_word' >> beam.Create(['word1'])
4242

sdks/python/apache_beam/examples/cookbook/bigquery_tornadoes.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,11 @@ def count_tornadoes(input_data):
5353
"""
5454

5555
return (input_data
56-
| beam.FlatMap(
57-
'months with tornadoes',
56+
| 'months with tornatoes' >> beam.FlatMap(
5857
lambda row: [(int(row['month']), 1)] if row['tornado'] else [])
5958
| 'monthly count' >> beam.CombinePerKey(sum)
60-
| 'format' >> beam.Map(lambda (k, v): {'month': k, 'tornado_count': v}))
59+
| 'format' >> beam.Map(
60+
lambda (k, v): {'month': k, 'tornado_count': v}))
6161

6262

6363
def run(argv=None):

sdks/python/apache_beam/examples/cookbook/bigshuffle.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,9 @@ def run(argv=None):
5959

6060
# Count the occurrences of each word.
6161
output = (lines
62-
| 'split' >> beam.Map(lambda x: (x[:10], x[10:99])
63-
).with_output_types(beam.typehints.KV[str, str])
62+
| 'split' >> beam.Map(
63+
lambda x: (x[:10], x[10:99]))
64+
.with_output_types(beam.typehints.KV[str, str])
6465
| 'group' >> beam.GroupByKey()
6566
| beam.FlatMap(
6667
'format',

sdks/python/apache_beam/examples/cookbook/filters.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def run(argv=None):
8888

8989
p = beam.Pipeline(argv=pipeline_args)
9090

91-
input_data = p | 'input' >> beam.Read(beam.io.BigQuerySource(known_args.input))
91+
input_data = p | beam.Read(beam.io.BigQuerySource(known_args.input))
9292

9393
# pylint: disable=expression-not-assigned
9494
(filter_cold_days(input_data, known_args.month_filter)

sdks/python/apache_beam/examples/snippets/snippets.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,7 @@ def pipeline_options_command_line(argv):
307307
p = beam.Pipeline(argv=pipeline_args)
308308
lines = p | beam.io.Read('ReadFromText',
309309
beam.io.TextFileSource(known_args.input))
310-
lines | 'WriteToText' >> beam.io.Write(beam.io.TextFileSink(known_args.output))
310+
lines | beam.io.Write(beam.io.TextFileSink(known_args.output))
311311
# [END pipeline_options_command_line]
312312

313313
p.run()

sdks/python/apache_beam/examples/snippets/snippets_test.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ def capitals(word):
101101
self.assertEqual({'A', 'C'}, set(all_capitals))
102102

103103
def test_pardo_with_label(self):
104+
# pylint: disable=line-too-long
104105
words = ['aa', 'bbc', 'defg']
105106
# [START model_pardo_with_label]
106107
result = words | 'CountUniqueLetters' >> beam.Map(lambda word: len(set(word)))
@@ -127,10 +128,9 @@ def filter_using_length(word, lower_bound, upper_bound=float('inf')):
127128
small_words = words | 'small' >> beam.FlatMap(filter_using_length, 0, 3)
128129

129130
# A single deferred side input.
130-
larger_than_average = (words
131-
| 'large' >> beam.FlatMap(filter_using_length,
132-
lower_bound=pvalue.AsSingleton(
133-
avg_word_len)))
131+
larger_than_average = (words | 'large' >> beam.FlatMap(
132+
filter_using_length,
133+
lower_bound=pvalue.AsSingleton(avg_word_len)))
134134

135135
# Mix and match.
136136
small_but_nontrivial = words | beam.FlatMap(filter_using_length,

0 commit comments

Comments
 (0)