Skip to content

Commit 816a4d7

Browse files
committed
Enable lots of extra Ruff checks
Warns about various small code smells and odd issues we can catch early. Nothing here should change the program behavior directly.
1 parent 1cb57d0 commit 816a4d7

8 files changed

+74
-67
lines changed

itch_dl/api.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Optional
1+
from typing import Optional, Any
22

33
import requests
44
from requests import Session
@@ -9,7 +9,7 @@
99

1010

1111
class ItchApiClient:
12-
def __init__(self, api_key: str, user_agent: str, base_url: Optional[str] = None):
12+
def __init__(self, api_key: str, user_agent: str, base_url: Optional[str] = None) -> None:
1313
self.base_url = base_url or ITCH_API
1414
self.api_key = api_key
1515

@@ -33,7 +33,7 @@ def get(
3333
endpoint: str,
3434
append_api_key: bool = True,
3535
guess_encoding: bool = False,
36-
**kwargs,
36+
**kwargs: Any, # noqa: ANN401
3737
) -> requests.Response:
3838
"""Wrapper around `requests.get`.
3939
@@ -49,11 +49,7 @@ def get(
4949

5050
kwargs["data"] = params
5151

52-
if endpoint.startswith("https://"):
53-
url = endpoint
54-
else:
55-
url = self.base_url + endpoint
56-
52+
url = endpoint if endpoint.startswith("https://") else self.base_url + endpoint
5753
r = self.requests.get(url, **kwargs)
5854

5955
# Itch always returns UTF-8 pages and API responses. Force

itch_dl/cli.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import sys
23
import logging
34
import argparse
45

@@ -55,7 +56,7 @@ def run() -> int:
5556
logging.getLogger().setLevel(logging.DEBUG)
5657

5758
if not settings.api_key:
58-
exit(
59+
sys.exit(
5960
"You did not provide an API key which itch-dl requires.\n"
6061
"See https://github.com/DragoonAethis/itch-dl/wiki/API-Keys for more info."
6162
)
@@ -67,17 +68,17 @@ def run() -> int:
6768
client = ItchApiClient(settings.api_key, settings.user_agent)
6869
profile_req = client.get("/profile")
6970
if not profile_req.ok:
70-
exit(
71+
sys.exit(
7172
f"Provided API key appears to be invalid: {profile_req.text}\n"
7273
"See https://github.com/DragoonAethis/itch-dl/wiki/API-Keys for more info."
7374
)
7475

7576
jobs = get_jobs_for_url_or_path(url_or_path, settings)
7677
jobs = list(set(jobs)) # Deduplicate, just in case...
77-
logging.info(f"Found {len(jobs)} URL(s).")
78+
logging.info("Found %d URL(s).", len(jobs))
7879

7980
if len(jobs) == 0:
80-
exit("No URLs to download.")
81+
sys.exit("No URLs to download.")
8182

8283
if settings.urls_only:
8384
for job in jobs:
@@ -92,4 +93,5 @@ def run() -> int:
9293
# Grab all the download keys (there's no way to fetch them per title...):
9394
keys = get_download_keys(client)
9495

95-
return drive_downloads(jobs, settings, keys)
96+
drive_downloads(jobs, settings, keys)
97+
return 0

itch_dl/config.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -65,14 +65,14 @@ def load_config(args: argparse.Namespace, profile: Optional[str] = None) -> Sett
6565
profile_file_path = os.path.join(config_path, "profiles", profile or "")
6666

6767
if os.path.isfile(config_file_path):
68-
logging.debug(f"Found config file: {config_file_path}")
68+
logging.debug("Found config file: %s", config_file_path)
6969
with open(config_file_path) as f:
7070
config_data = json.load(f)
7171
else:
7272
config_data = {}
7373

7474
if os.path.isfile(profile_file_path):
75-
logging.debug(f"Found profile: {profile_file_path}")
75+
logging.debug("Found profile: %s", profile_file_path)
7676
with open(config_file_path) as f:
7777
profile_data = json.load(f)
7878

itch_dl/downloader.py

+20-17
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import urllib.parse
77
import zipfile
88
import tarfile
9-
from typing import List, Dict, TypedDict, Optional, Union
9+
from typing import List, Dict, TypedDict, Optional, Union, Any
1010

1111
from bs4 import BeautifulSoup
1212
from requests.exceptions import HTTPError, JSONDecodeError
@@ -30,7 +30,7 @@
3030

3131

3232
class DownloadResult:
33-
def __init__(self, url: str, success: bool, errors, external_urls: List[str]):
33+
def __init__(self, url: str, success: bool, errors: Optional[List[str]], external_urls: List[str]) -> None:
3434
self.url = url
3535
self.success = success
3636
self.errors = errors or []
@@ -62,13 +62,13 @@ class GameMetadata(TypedDict, total=False):
6262

6363

6464
class GameDownloader:
65-
def __init__(self, settings: Settings, keys: Dict[int, str]):
65+
def __init__(self, settings: Settings, keys: Dict[int, str]) -> None:
6666
self.settings = settings
6767
self.download_keys = keys
6868
self.client = ItchApiClient(settings.api_key, settings.user_agent)
6969

7070
@staticmethod
71-
def get_rating_json(site) -> Optional[dict]:
71+
def get_rating_json(site: BeautifulSoup) -> Optional[dict]:
7272
for ldjson_node in site.find_all("script", type="application/ld+json"):
7373
try:
7474
ldjson: dict = json.loads(ldjson_node.text.strip())
@@ -80,7 +80,7 @@ def get_rating_json(site) -> Optional[dict]:
8080
return None
8181

8282
@staticmethod
83-
def get_meta(site, **kwargs) -> Optional[str]:
83+
def get_meta(site: BeautifulSoup, **kwargs: Any) -> Optional[str]: # noqa: ANN401
8484
"""Grabs <meta property="xyz" content="value"/> values."""
8585
node = site.find("meta", attrs=kwargs)
8686
if not node:
@@ -160,8 +160,8 @@ def extract_metadata(self, game_id: int, url: str, site: BeautifulSoup) -> GameM
160160
infobox = parse_infobox(infobox_div)
161161
for dt in ("created_at", "updated_at", "released_at", "published_at"):
162162
if dt in infobox:
163-
metadata[dt] = infobox[dt].isoformat() # noqa (non-literal TypedDict keys)
164-
del infobox[dt] # noqa (non-literal TypedDict keys)
163+
metadata[dt] = infobox[dt].isoformat() # noqa: PyTypedDict (non-literal TypedDict keys)
164+
del infobox[dt] # noqa: PyTypedDict (non-literal TypedDict keys)
165165

166166
if "author" in infobox:
167167
metadata["author"] = infobox["author"]["author"]
@@ -179,7 +179,7 @@ def extract_metadata(self, game_id: int, url: str, site: BeautifulSoup) -> GameM
179179
if agg_rating:
180180
try:
181181
metadata["rating"] = {"average": float(agg_rating["ratingValue"]), "votes": agg_rating["ratingCount"]}
182-
except: # noqa
182+
except: # noqa: E722 (do not use bare `except`)
183183
logging.exception("Could not extract the rating metadata...")
184184
pass # Nope, just, don't
185185

@@ -221,7 +221,7 @@ def download_file_by_upload_id(self, upload_id: int, download_path: Optional[str
221221
return self.download_file(f"/uploads/{upload_id}/download", download_path, credentials)
222222

223223
@staticmethod
224-
def get_decompressed_content_size(target_path) -> None | int:
224+
def get_decompressed_content_size(target_path: str | os.PathLike[str]) -> None | int:
225225
"""For some files, Itch API returns the decompressed file size, but serves
226226
compressed downloads. Try to figure out the decompressed size. It may be
227227
a single file in the root, or a container + files in it."""
@@ -248,7 +248,7 @@ def get_decompressed_content_size(target_path) -> None | int:
248248

249249
return None
250250

251-
def download(self, url: str, skip_downloaded: bool = True):
251+
def download(self, url: str, skip_downloaded: bool = True) -> DownloadResult:
252252
match = re.match(ITCH_GAME_URL_REGEX, url)
253253
if not match:
254254
return DownloadResult(url, False, [f"Game URL is invalid: {url} - please file a new issue."], [])
@@ -310,15 +310,15 @@ def download(self, url: str, skip_downloaded: bool = True):
310310
logging.info(
311311
"File '%s' does not match the glob filter '%s', skipping",
312312
file_name,
313-
self.settings.filter_files_glob
313+
self.settings.filter_files_glob,
314314
)
315315
continue
316316

317317
if self.settings.filter_files_regex and not re.fullmatch(self.settings.filter_files_regex, file_name):
318318
logging.info(
319319
"File '%s' does not match the regex filter '%s', skipping",
320320
file_name,
321-
self.settings.filter_files_regex
321+
self.settings.filter_files_regex,
322322
)
323323
continue
324324

@@ -338,7 +338,7 @@ def download(self, url: str, skip_downloaded: bool = True):
338338
continue
339339

340340
if upload_is_external:
341-
logging.debug("Found external download URL for %s: %s", target_url)
341+
logging.debug("Found external download URL for %s: %s", title, target_url)
342342
external_urls.append(target_url)
343343
continue
344344

@@ -356,7 +356,10 @@ def download(self, url: str, skip_downloaded: bool = True):
356356
and downloaded_size != expected_size
357357
and content_size != expected_size
358358
):
359-
errors.append(f"Downloaded file size is {downloaded_size} (content {content_size}), expected {expected_size} for upload {upload}")
359+
errors.append(
360+
f"Downloaded file size is {downloaded_size} (content {content_size}), "
361+
f"expected {expected_size} for upload {upload}"
362+
)
360363

361364
logging.debug("Done downloading files for %s", title)
362365
except Exception as e:
@@ -366,7 +369,7 @@ def download(self, url: str, skip_downloaded: bool = True):
366369
metadata["external_downloads"] = external_urls
367370

368371
if len(external_urls) > 0:
369-
logging.warning(f"Game {title} has external download URLs: {external_urls}")
372+
logging.warning("Game %s has external download URLs: %s", title, external_urls)
370373

371374
# TODO: Mirror JS/CSS assets
372375
if self.settings.mirror_web:
@@ -395,7 +398,7 @@ def download(self, url: str, skip_downloaded: bool = True):
395398
json.dump(metadata, f, indent=4)
396399

397400
if len(errors) > 0:
398-
logging.error(f"Game {title} has download errors: {errors}")
401+
logging.error("Game %s has download errors: %s", title, errors)
399402

400403
logging.info("Finished job %s (%s)", url, title)
401404
return DownloadResult(url, len(errors) == 0, errors, external_urls)
@@ -405,7 +408,7 @@ def drive_downloads(
405408
jobs: List[str],
406409
settings: Settings,
407410
keys: Dict[int, str],
408-
):
411+
) -> None:
409412
downloader = GameDownloader(settings, keys)
410413
tqdm_args = {
411414
"desc": "Games",

itch_dl/handlers.py

+12-12
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@ def get_game_jam_json(jam_url: str, client: ItchApiClient) -> dict:
3131
raise ItchDownloadError(
3232
"Provided site did not contain the Game Jam ID. Provide "
3333
"the path to the game jam entries JSON file instead, or "
34-
"create an itch-dl issue with the Game Jam URL."
34+
"create an itch-dl issue with the Game Jam URL.",
3535
)
3636

37-
logging.info(f"Extracted Game Jam ID: {jam_id}")
37+
logging.info("Extracted Game Jam ID: %d", jam_id)
3838
r = client.get(f"{ITCH_URL}/jam/{jam_id}/entries.json")
3939
if not r.ok:
4040
raise ItchDownloadError(f"Could not download the game jam entries list: {r.status_code} {r.reason}")
@@ -57,7 +57,7 @@ def get_jobs_for_browse_url(url: str, client: ItchApiClient) -> List[str]:
5757
logging.info("Scraping game URLs from RSS feeds for %s", url)
5858

5959
while True:
60-
logging.info(f"Downloading page {page} (found {len(found_urls)} URLs total)")
60+
logging.info("Downloading page %d (found %d URLs total)", page, len(found_urls))
6161
r = client.get(f"{url}.xml?page={page}", append_api_key=False)
6262
if not r.ok:
6363
logging.info("RSS feed returned %s, finished.", r.reason)
@@ -69,7 +69,7 @@ def get_jobs_for_browse_url(url: str, client: ItchApiClient) -> List[str]:
6969
logging.info("No more items, finished.")
7070
break
7171

72-
logging.info(f"Found {len(rss_items)} items.")
72+
logging.info("Found %d items.", len(rss_items))
7373
for item in rss_items:
7474
link_node = item.find("link")
7575
if link_node is None:
@@ -92,7 +92,7 @@ def get_jobs_for_collection_json(url: str, client: ItchApiClient) -> List[str]:
9292
found_urls: Set[str] = set()
9393

9494
while True:
95-
logging.info(f"Downloading page {page} (found {len(found_urls)} URLs total)")
95+
logging.info("Downloading page %d (found %d URLs total)", page, len(found_urls))
9696
r = client.get(url, data={"page": page}, timeout=15)
9797
if not r.ok:
9898
logging.info("Collection page %d returned %d %s, finished.", page, r.status_code, r.reason)
@@ -129,14 +129,14 @@ def get_jobs_for_creator(creator: str, client: ItchApiClient) -> List[str]:
129129

130130
soup = BeautifulSoup(r.text, features="xml")
131131
for link in soup.select("a.game_link"):
132-
link_url = link.attrs.get('href')
132+
link_url = link.attrs.get("href")
133133
if not link_url:
134134
continue
135135

136136
if link_url.startswith(prefix):
137137
game_links.add(link_url)
138138

139-
return list(sorted(game_links))
139+
return sorted(game_links)
140140

141141

142142
def get_jobs_for_itch_url(url: str, client: ItchApiClient) -> List[str]:
@@ -145,7 +145,7 @@ def get_jobs_for_itch_url(url: str, client: ItchApiClient) -> List[str]:
145145
url = "https://" + url[7:]
146146

147147
if url.startswith(f"https://www.{ITCH_BASE}/"):
148-
logging.info(f"Correcting www.{ITCH_BASE} to {ITCH_BASE}")
148+
logging.info("Correcting www.%s to %s", ITCH_BASE, ITCH_BASE)
149149
url = ITCH_URL + "/" + url[20:]
150150

151151
url_parts = urllib.parse.urlparse(url)
@@ -199,7 +199,7 @@ def get_jobs_for_itch_url(url: str, client: ItchApiClient) -> List[str]:
199199

200200
elif url_parts.netloc.endswith(f".{ITCH_BASE}"):
201201
if len(url_path_parts) == 0: # Author
202-
return get_jobs_for_creator(url_parts.netloc.split('.')[0], client)
202+
return get_jobs_for_creator(url_parts.netloc.split(".")[0], client)
203203

204204
else: # Single game
205205
# Just clean and return the URL:
@@ -226,9 +226,9 @@ def get_jobs_for_path(path: str) -> List[str]:
226226
url_list = []
227227
with open(path) as f: # Plain job list?
228228
for line in f:
229-
line = line.strip()
230-
if line.startswith("https://") or line.startswith("http://"):
231-
url_list.append(line)
229+
link = line.strip()
230+
if link.startswith("https://") or link.startswith("http://"):
231+
url_list.append(link)
232232

233233
if len(url_list) > 0:
234234
logging.info("Parsing provided file as a list of URLs to fetch...")

itch_dl/infobox.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,6 @@ def parse_infobox(infobox: BeautifulSoup) -> InfoboxMetadata:
120120

121121
parsed_block = parse_tr(name, content_td)
122122
if parsed_block:
123-
meta[parsed_block[0]] = parsed_block[1] # noqa (non-literal TypedDict keys)
123+
meta[parsed_block[0]] = parsed_block[1] # noqa: PyTypedDict (non-literal TypedDict keys)
124124

125125
return meta

0 commit comments

Comments
 (0)