Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit dc32870

Browse files
committed
style: solve lint errors
1 parent 175dc30 commit dc32870

File tree

1 file changed

+14
-15
lines changed
  • src/mkdocs_newsletter/services

1 file changed

+14
-15
lines changed

src/mkdocs_newsletter/services/rss.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -113,35 +113,34 @@ def _build_rss_entries(
113113
html = BeautifulSoup(newsletter_file, "html.parser")
114114

115115
try:
116-
timeago = html.find("span", {"class": "timeago"})
117-
if timeago is None:
116+
if html.find("span", {"class": "timeago"}) is None:
118117
raise ValueError("Could not find timeago")
119-
# ignore: The object doesn't have __getitem__ defined but it still works. It's probably a typing error
120-
published = timeago["datetime"] # type: ignore
118+
# ignore: The object doesn't have __getitem__ defined but it still works.
119+
# It's probably a typing error
120+
published = html.find("span", {"class": "timeago"})[
121+
"datetime"
122+
] # type: ignore
121123
except IndexError:
122124
published = newsletter.date.isoformat()
123125

124126
# Clean the source code
125127

126128
# Remove the h1 as it's already in the title
127-
article = html.article
128-
if article is None:
129+
if html.article is None:
129130
raise ValueError("Could not find the article")
130-
h1 = article.h1
131-
if h1 is None:
131+
if html.article.h1 is None:
132132
raise ValueError("Could not find h1 title")
133-
title = h1.text
134-
h1.extract()
133+
title = html.article.h1.text
134+
html.article.h1.extract()
135135

136136
# Remove the Last updated: line
137137
with suppress(AttributeError):
138-
div = article.div
139-
if div is None:
140-
pass
141-
div.extract()
138+
if html.article.div is None:
139+
raise AttributeError
140+
html.article.div.extract()
142141

143142
# Remove the permalinks
144-
for permalink in article.find_all("a", {"class": "headerlink"}):
143+
for permalink in html.article.find_all("a", {"class": "headerlink"}):
145144
permalink.extract()
146145

147146
description = re.sub(

0 commit comments

Comments
 (0)