2024-01-17 22:02:56 +00:00
|
|
|
"""
|
2024-02-25 18:53:44 +00:00
|
|
|
Add a new item to a media catalogue, using various APIs:
|
|
|
|
|
|
|
|
- TV series' and films using the TMDB API and IDs;
|
|
|
|
- TV episodes using the TMDB API and TVDB IDs (because the TMDB
|
|
|
|
API is difficult and a lot of TMDB records don't have IMDB IDs);
|
|
|
|
- books using the OpenLibrary API and ISBNs; and
|
|
|
|
- games using the GiantBomb API and IDs.
|
2024-01-17 22:02:56 +00:00
|
|
|
"""
|
2024-01-09 22:35:47 +00:00
|
|
|
|
|
|
|
import json
|
|
|
|
import logging
|
2024-01-14 14:00:07 +00:00
|
|
|
import os
|
2024-01-09 22:35:47 +00:00
|
|
|
import re
|
2024-01-17 22:02:56 +00:00
|
|
|
import time
|
|
|
|
from datetime import datetime
|
2024-01-09 22:35:47 +00:00
|
|
|
import requests
|
2024-01-17 22:02:56 +00:00
|
|
|
from dotenv import load_dotenv
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
authors = []
|
2024-01-17 21:17:29 +00:00
|
|
|
|
2024-02-03 23:25:14 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
def setup_logger(name="add_item"):
|
2024-01-17 22:02:56 +00:00
|
|
|
"""Set up the logger for console and file"""
|
|
|
|
|
|
|
|
logr = logging.getLogger(name)
|
2024-01-17 19:23:35 +00:00
|
|
|
|
|
|
|
c_handler = logging.StreamHandler()
|
2024-01-17 21:17:29 +00:00
|
|
|
f_handler = logging.FileHandler("./logs/run.log")
|
2024-01-17 19:23:35 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
logging.root.setLevel(logging.INFO)
|
2024-01-17 19:23:35 +00:00
|
|
|
c_handler.setLevel(logging.INFO)
|
2024-01-23 18:57:22 +00:00
|
|
|
f_handler.setLevel(logging.WARNING)
|
2024-01-17 19:23:35 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
c_format = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
|
|
|
|
f_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
2024-01-17 19:23:35 +00:00
|
|
|
|
|
|
|
c_handler.setFormatter(c_format)
|
|
|
|
f_handler.setFormatter(f_format)
|
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
logr.addHandler(c_handler)
|
|
|
|
logr.addHandler(f_handler)
|
2024-01-17 19:23:35 +00:00
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
return logr
|
2024-01-17 19:23:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
logger = setup_logger()
|
2024-01-14 15:11:01 +00:00
|
|
|
|
|
|
|
load_dotenv()
|
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
TMDB_API_KEY = os.getenv("TMDB_API_KEY")
|
2024-01-14 15:11:01 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if "" == TMDB_API_KEY:
|
|
|
|
logger.error("TMDB API key not found")
|
2024-01-14 15:11:01 +00:00
|
|
|
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
def return_if_exists(item_id: str, media_type: str, log: str) -> dict | None:
|
2024-01-23 18:57:22 +00:00
|
|
|
"""Returns an item if it exists in the requested log"""
|
|
|
|
|
|
|
|
logger.info(f"Checking for '{item_id}' in '{log}'…")
|
2024-02-03 23:25:14 +00:00
|
|
|
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
|
2024-01-23 18:57:22 +00:00
|
|
|
log_items = json.load(log_file)
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
id_key = "id"
|
|
|
|
if "books" == media_type:
|
|
|
|
if re.search("OL[0-9]+[MW]", item_id) is not None:
|
|
|
|
id_key = "ol_id"
|
|
|
|
elif re.search("[0-9]{13}", item_id) is not None:
|
|
|
|
id_key = "isbn_13"
|
|
|
|
elif re.search("[0-9]{10}", item_id) is not None:
|
|
|
|
id_key = "isbn_10"
|
|
|
|
else:
|
|
|
|
raise Exception("Invalid ID for book")
|
|
|
|
|
2024-02-03 23:25:14 +00:00
|
|
|
existing_items = [
|
|
|
|
log_item
|
|
|
|
for log_item in log_items
|
2024-03-15 19:57:39 +00:00
|
|
|
if id_key in log_item and log_item[id_key] == item_id
|
2024-02-03 23:25:14 +00:00
|
|
|
]
|
2024-01-23 18:57:22 +00:00
|
|
|
if len(existing_items) > 0:
|
|
|
|
logger.info(f"Found item in '{log}'")
|
|
|
|
return existing_items[-1]
|
|
|
|
logger.info(f"'{item_id}' not found in '{log}'")
|
|
|
|
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
def delete_existing(item_id: str, media_type: str, log: str) -> None:
|
2024-01-23 18:57:22 +00:00
|
|
|
"""Deletes an item from a log if it matches the ID"""
|
|
|
|
|
|
|
|
logger.info(f"Deleting '{item_id}' from '{log}'…")
|
2024-02-03 23:25:14 +00:00
|
|
|
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
|
2024-01-23 18:57:22 +00:00
|
|
|
log_items = json.load(log_file)
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
id_key = "id"
|
|
|
|
if "books" == media_type:
|
|
|
|
if re.search("OL[0-9]+[MW]", item_id) is not None:
|
|
|
|
id_key = "ol_id"
|
|
|
|
elif re.search("[0-9]{13}", item_id) is not None:
|
|
|
|
id_key = "isbn_13"
|
|
|
|
elif re.search("[0-9]{10}", item_id) is not None:
|
|
|
|
id_key = "isbn_10"
|
|
|
|
else:
|
|
|
|
raise Exception("Invalid ID for book")
|
|
|
|
|
2024-07-23 07:57:19 +00:00
|
|
|
elif media_type in ["films", "tv_episodes"]:
|
2024-03-15 19:57:39 +00:00
|
|
|
if re.search("tt[0-9]+", item_id) is not None:
|
|
|
|
id_key = "isbn_id"
|
|
|
|
elif re.search("[0-9]+", item_id) is not None:
|
|
|
|
id_key = "tmdb_id"
|
|
|
|
else:
|
|
|
|
raise Exception("Invalid ID for film")
|
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
old_len = len(log_items)
|
2024-02-03 23:25:14 +00:00
|
|
|
log_items = [
|
|
|
|
log_item
|
|
|
|
for log_item in log_items
|
2024-03-15 19:57:39 +00:00
|
|
|
if id_key not in log_item
|
|
|
|
or (id_key in log_item and log_item[id_key] != item_id)
|
2024-02-03 23:25:14 +00:00
|
|
|
]
|
2024-01-23 18:57:22 +00:00
|
|
|
if len(log_items) < (old_len - 1):
|
|
|
|
raise Exception("More than one deletion made, discarding…")
|
2024-03-15 20:58:01 +00:00
|
|
|
elif len(log_items) == old_len:
|
|
|
|
raise Exception("No item deleted, skipping…")
|
2024-01-23 18:57:22 +00:00
|
|
|
|
2024-02-03 23:25:14 +00:00
|
|
|
with open(f"./data/{media_type}/{log}.json", "w", encoding="utf-8") as log_file:
|
2024-01-23 18:57:22 +00:00
|
|
|
json.dump(log_items, log_file, indent=4)
|
|
|
|
logger.info(f"'{item_id}' deleted from '{log}'")
|
|
|
|
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
def check_for_existing(
|
|
|
|
item_id, media_type, log
|
|
|
|
) -> tuple[dict[dict, str] | None, str | None]:
|
2024-02-25 18:53:44 +00:00
|
|
|
"""
|
|
|
|
Check for an existing item in the current log, and pull the
|
|
|
|
`date_added` etc. and mark it as a repeat if so.
|
|
|
|
Otherwise, check for an existing item in the other logs, and move
|
|
|
|
it to the specified log if so.
|
|
|
|
"""
|
2024-01-23 18:57:22 +00:00
|
|
|
|
|
|
|
logger.info(f"Checking for '{item_id}' in logs…")
|
|
|
|
|
|
|
|
# Check in specified log
|
|
|
|
existing_item = return_if_exists(item_id, media_type, log)
|
|
|
|
|
|
|
|
if existing_item is not None:
|
|
|
|
if "log" == log:
|
|
|
|
existing_item["is_repeat"] = True
|
|
|
|
return existing_item, None
|
|
|
|
|
2024-02-03 23:25:14 +00:00
|
|
|
for log_to_check in [
|
|
|
|
p_log for p_log in ["log", "current", "wishlist"] if p_log != log
|
|
|
|
]:
|
|
|
|
if (
|
2024-07-23 07:57:19 +00:00
|
|
|
"current" == log_to_check and media_type in ["books", "games", "tv_series"]
|
2024-02-03 23:25:14 +00:00
|
|
|
) or (
|
|
|
|
"wishlist" == log_to_check
|
2024-07-23 07:57:19 +00:00
|
|
|
and media_type in ["books", "games", "films", "tv_series"]
|
2024-02-03 23:25:14 +00:00
|
|
|
):
|
2024-01-23 18:57:22 +00:00
|
|
|
existing_item = return_if_exists(item_id, media_type, log_to_check)
|
|
|
|
if existing_item is not None:
|
|
|
|
return existing_item, log_to_check
|
|
|
|
|
|
|
|
return None, None
|
|
|
|
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
def add_item_to_log(item_id: str, media_type: str, log: str) -> None:
|
2024-01-14 15:11:01 +00:00
|
|
|
"""Add a film, book, TV series or TV episode to a log"""
|
2024-01-17 19:23:35 +00:00
|
|
|
|
|
|
|
logger.info(f"Processing {item_id}…")
|
2024-01-14 15:11:01 +00:00
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
item: dict | None = None
|
2024-02-15 10:22:03 +00:00
|
|
|
log_to_delete = None
|
2024-07-23 07:57:19 +00:00
|
|
|
if media_type not in ["tv_episodes", "books"]:
|
2024-02-15 10:22:03 +00:00
|
|
|
item, log_to_delete = check_for_existing(item_id, media_type, log)
|
2024-01-23 18:57:22 +00:00
|
|
|
|
|
|
|
if item is None:
|
2024-02-15 11:28:11 +00:00
|
|
|
item = import_by_id(item_id, media_type, log)
|
2024-01-23 18:57:22 +00:00
|
|
|
if item is None:
|
|
|
|
raise Exception("No item found")
|
2024-01-14 15:11:01 +00:00
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
if "books" == media_type:
|
2024-03-17 13:54:15 +00:00
|
|
|
new_item = None
|
|
|
|
if "work" in "ol_id":
|
|
|
|
new_item, log_to_delete = check_for_existing(
|
|
|
|
item["work"]["ol_id"], media_type, log
|
|
|
|
)
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
if new_item is None:
|
2024-03-17 13:54:15 +00:00
|
|
|
if "ol_id" in item:
|
|
|
|
new_item, log_to_delete = check_for_existing(item["ol_id"], media_type, log)
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
if new_item is None:
|
2024-03-17 13:54:15 +00:00
|
|
|
if "isbn_13" in item:
|
2024-03-15 19:57:39 +00:00
|
|
|
new_item, log_to_delete = check_for_existing(
|
2024-03-17 13:54:15 +00:00
|
|
|
item["isbn_13"], media_type, log
|
2024-03-15 19:57:39 +00:00
|
|
|
)
|
2024-03-17 13:54:15 +00:00
|
|
|
|
|
|
|
if new_item is None:
|
|
|
|
if "isbn_10" in item:
|
|
|
|
new_item, log_to_delete = check_for_existing(
|
|
|
|
item["isbn_10"], media_type, log
|
|
|
|
)
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
item = new_item if new_item is not None else item
|
2024-02-25 18:53:44 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if log in ["log", "current"]:
|
2024-07-23 07:57:19 +00:00
|
|
|
if "date_started" not in item and media_type in ["books", "tv_series", "games"]:
|
2024-01-17 21:17:29 +00:00
|
|
|
date_started = ""
|
|
|
|
while re.search("[0-9]{4}-[0-9]{2}-[0-9]{2}", date_started) is None:
|
2024-01-15 21:43:23 +00:00
|
|
|
date_started = input("Enter date started [YYYY-MM-DD, t for today]: ")
|
2024-01-17 21:17:29 +00:00
|
|
|
if "t" == date_started:
|
|
|
|
date_started = datetime.today().strftime("%Y-%m-%d")
|
|
|
|
item["date_started"] = date_started
|
2024-01-14 15:11:01 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
if "date_finished" not in item and "log" == log:
|
2024-01-17 21:17:29 +00:00
|
|
|
date_finished = ""
|
|
|
|
while re.search("[0-9]{4}-[0-9]{2}-[0-9]{2}", date_finished) is None:
|
2024-01-15 21:43:23 +00:00
|
|
|
date_finished = input("Enter date finished [YYYY-MM-DD, t for today]: ")
|
2024-01-17 21:17:29 +00:00
|
|
|
if "t" == date_finished:
|
|
|
|
date_finished = datetime.today().strftime("%Y-%m-%d")
|
|
|
|
item["date_finished"] = date_finished
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
if "is_repeat" not in item:
|
|
|
|
is_repeat = ""
|
|
|
|
while is_repeat not in ["y", "n"]:
|
|
|
|
is_repeat = input("Is this a repeat entry? [y/n]: ")
|
|
|
|
if "y" == is_repeat:
|
|
|
|
item["is_repeat"] = True
|
|
|
|
|
|
|
|
if "added_by_id" not in item:
|
|
|
|
item["added_by_id"] = item_id
|
2024-01-14 15:11:01 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
if "comments" not in item:
|
|
|
|
comments = input("Enter comments (optional): ")
|
|
|
|
if "" != comments:
|
|
|
|
item["comments"] = comments
|
2024-01-14 15:11:01 +00:00
|
|
|
|
|
|
|
# Validation step
|
|
|
|
print(f"{media_type} data to add:\n")
|
|
|
|
print(json.dumps(item, indent=4))
|
2024-01-17 21:17:29 +00:00
|
|
|
if "y" != input("\nDoes this look correct? [y]: "):
|
|
|
|
return
|
2024-01-14 15:11:01 +00:00
|
|
|
|
|
|
|
# Save changes
|
2024-01-17 19:23:35 +00:00
|
|
|
logger.info(f"Adding {media_type} to {log}…")
|
2024-01-14 15:11:01 +00:00
|
|
|
|
2024-02-03 23:25:14 +00:00
|
|
|
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
|
2024-01-14 15:11:01 +00:00
|
|
|
log_items = json.load(log_file)
|
|
|
|
|
|
|
|
log_items.insert(0, item)
|
|
|
|
|
2024-02-03 23:25:14 +00:00
|
|
|
with open(f"./data/{media_type}/{log}.json", "w", encoding="utf-8") as log_file:
|
2024-01-17 21:17:29 +00:00
|
|
|
json.dump(log_items, log_file, indent=4)
|
2024-01-14 15:11:01 +00:00
|
|
|
|
2024-01-17 19:23:35 +00:00
|
|
|
logger.info(f"Added {media_type} {item_id} to {log}")
|
2024-01-14 15:11:01 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
if log_to_delete is not None:
|
|
|
|
delete_existing(item_id, media_type, log_to_delete)
|
|
|
|
|
2024-01-14 14:00:07 +00:00
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
def import_by_id(import_id, media_type, log) -> dict | None:
|
2024-01-17 22:02:56 +00:00
|
|
|
"""Import from the appropriate API by unique ID"""
|
|
|
|
|
2024-07-23 07:57:19 +00:00
|
|
|
if media_type in ["films", "tv_series"]:
|
2024-01-23 18:57:22 +00:00
|
|
|
return import_from_tmdb_by_id(import_id, media_type)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-07-23 07:57:19 +00:00
|
|
|
if media_type in ["tv_episodes"]:
|
2024-02-25 18:53:44 +00:00
|
|
|
return import_from_tmdb_by_external_id(import_id, media_type)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
if media_type in ["books"]:
|
2024-02-15 11:28:11 +00:00
|
|
|
if "wishlist" == log:
|
|
|
|
return import_from_openlibrary_by_ol_key(import_id)
|
|
|
|
|
|
|
|
else:
|
2024-02-25 18:53:44 +00:00
|
|
|
return import_from_openlibrary_by_isbn(
|
2024-02-15 11:28:11 +00:00
|
|
|
"".join(re.findall(r"\d+", import_id)), media_type
|
|
|
|
)
|
2024-01-14 14:00:07 +00:00
|
|
|
|
2024-05-05 09:51:22 +00:00
|
|
|
logger.error("Invalid media_type!")
|
|
|
|
|
2024-01-14 14:00:07 +00:00
|
|
|
|
2024-02-25 18:53:44 +00:00
|
|
|
def import_from_tmdb_by_external_id(external_id, media_type) -> dict:
|
|
|
|
"""Retrieve a film, TV show or TV episode from TMDB using an IMDB or TVDB ID"""
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-02-25 18:53:44 +00:00
|
|
|
api_url = f"https://api.themoviedb.org/3/find/{external_id}"
|
2024-02-15 10:22:03 +00:00
|
|
|
|
|
|
|
# Sending API request
|
|
|
|
response = requests.get(
|
|
|
|
api_url,
|
|
|
|
headers={"Authorization": f"Bearer {TMDB_API_KEY}"},
|
2024-03-15 19:57:39 +00:00
|
|
|
params={
|
|
|
|
"external_source": (
|
|
|
|
"imdb_id" if re.search("tt[0-9]+", external_id) else "tvdb_id"
|
|
|
|
)
|
|
|
|
},
|
|
|
|
timeout=15,
|
2024-02-15 10:22:03 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# Process the response
|
|
|
|
if 200 == response.status_code:
|
|
|
|
logger.debug(response.status_code)
|
|
|
|
|
|
|
|
elif 429 == response.status_code:
|
|
|
|
time.sleep(2)
|
2024-02-25 18:53:44 +00:00
|
|
|
return import_from_tmdb_by_external_id(external_id, media_type)
|
2024-02-15 10:22:03 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
raise Exception(f"Error {response.status_code}: {response.text}")
|
|
|
|
|
|
|
|
key = ""
|
2024-07-23 07:57:19 +00:00
|
|
|
if "tv_episodes" == media_type:
|
2024-02-15 10:22:03 +00:00
|
|
|
key = "tv_episode_results"
|
2024-07-23 07:57:19 +00:00
|
|
|
elif "tv_series" == media_type:
|
2024-02-15 10:22:03 +00:00
|
|
|
key = "tv_results"
|
|
|
|
elif "films" == media_type:
|
|
|
|
key = "movie_results"
|
|
|
|
|
|
|
|
response_data = json.loads(response.text)[key][0]
|
2024-03-15 19:57:39 +00:00
|
|
|
if response_data is None:
|
2024-02-25 18:53:44 +00:00
|
|
|
raise Exception(f"Nothing found for TVDB ID {external_id}!")
|
2024-02-15 10:22:03 +00:00
|
|
|
|
|
|
|
# Modify the returned result to add additional data
|
|
|
|
return cleanup_result(response_data, media_type)
|
|
|
|
|
|
|
|
|
|
|
|
def import_from_tmdb_by_id(tmdb_id, media_type) -> dict:
|
|
|
|
"""Retrieve a film, TV show or TV episode from TMDB using an TMDB ID"""
|
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
api_path = "movie" if "films" == media_type else "tv"
|
|
|
|
api_url = f"https://api.themoviedb.org/3/{api_path}/{tmdb_id}"
|
2024-01-09 22:35:47 +00:00
|
|
|
|
|
|
|
# Sending API request
|
|
|
|
response = requests.get(
|
2024-02-03 23:25:14 +00:00
|
|
|
api_url, headers={"Authorization": f"Bearer {TMDB_API_KEY}"}, timeout=15
|
2024-01-09 22:35:47 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# Process the response
|
2024-01-17 21:17:29 +00:00
|
|
|
if 200 == response.status_code:
|
|
|
|
logger.debug(response.status_code)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
elif 429 == response.status_code:
|
2024-01-09 22:35:47 +00:00
|
|
|
time.sleep(2)
|
2024-01-23 18:57:22 +00:00
|
|
|
return import_from_tmdb_by_id(tmdb_id, media_type)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-09 22:35:47 +00:00
|
|
|
else:
|
2024-01-15 21:43:23 +00:00
|
|
|
raise Exception(f"Error {response.status_code}: {response.text}")
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
response_data = json.loads(response.text)
|
2024-01-14 14:00:07 +00:00
|
|
|
|
2024-01-09 22:35:47 +00:00
|
|
|
# Modify the returned result to add additional data
|
2024-02-03 23:25:14 +00:00
|
|
|
return cleanup_result(response_data, media_type)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
def import_from_openlibrary_by_isbn(isbn, media_type) -> dict | None:
|
2024-01-15 21:43:23 +00:00
|
|
|
"""Retrieve a film, TV show or TV episode from TMDB using an IMDB ID"""
|
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
logging.info(f"Importing '{isbn}'…")
|
|
|
|
|
2024-01-15 21:43:23 +00:00
|
|
|
api_url = f"https://openlibrary.org/isbn/{isbn}"
|
|
|
|
|
|
|
|
# Sending API request
|
2024-01-17 22:02:56 +00:00
|
|
|
response = requests.get(api_url, headers={"accept": "application/json"}, timeout=15)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
|
|
|
# Process the response
|
2024-01-17 21:17:29 +00:00
|
|
|
if 200 == response.status_code:
|
|
|
|
logger.debug(response.status_code)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
elif 429 == response.status_code:
|
2024-01-15 21:43:23 +00:00
|
|
|
time.sleep(2)
|
2024-02-25 18:53:44 +00:00
|
|
|
return import_from_openlibrary_by_isbn(isbn, media_type)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
elif 404 == response.status_code:
|
|
|
|
logger.error(f"{response.status_code}: Not Found for ISBN '{isbn}'")
|
|
|
|
return None
|
2024-01-15 21:43:23 +00:00
|
|
|
else:
|
2024-01-17 19:23:35 +00:00
|
|
|
raise Exception(f"Error {response.status_code}: {response.text}")
|
2024-01-15 21:43:23 +00:00
|
|
|
|
|
|
|
item = json.loads(response.text)
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
for key in ["authors", "works"]:
|
2024-01-15 21:43:23 +00:00
|
|
|
if key in item:
|
|
|
|
for i, sub_item in enumerate(item[key]):
|
2024-01-17 21:17:29 +00:00
|
|
|
item[key][i] = import_from_openlibrary_by_ol_key(sub_item["key"])
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if "works" in item:
|
|
|
|
if len(item["works"]) > 1:
|
2024-02-03 23:25:14 +00:00
|
|
|
print(f"Multiple works found for {isbn}:")
|
|
|
|
print(item["works"])
|
|
|
|
idx = input(f"Select ID to use [0-{len(item['works'])-1}]: ")
|
|
|
|
item["works"][0] = item["works"][int(idx)]
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
item["work"] = item["works"][0]
|
|
|
|
del item["works"]
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
# Rate limiting
|
|
|
|
time.sleep(1)
|
|
|
|
|
2024-01-15 21:43:23 +00:00
|
|
|
# Modify the returned result to add additional data
|
|
|
|
return cleanup_result(item, media_type)
|
|
|
|
|
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
def import_from_openlibrary_by_ol_key(key) -> dict | None:
|
2024-01-17 22:02:56 +00:00
|
|
|
"""Retrieves an item (author or work, NOT edition) from OpenLibrary using an OL key"""
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
if len(key.split("/")) == 1:
|
2024-02-15 11:28:11 +00:00
|
|
|
key = f"/works/{key}"
|
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
logger.info(f"Retrieving {key}…")
|
2024-01-17 21:17:29 +00:00
|
|
|
_, mode, ol_id = key.split("/")
|
2024-03-15 19:57:39 +00:00
|
|
|
cached_authors = []
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
if "authors" == mode:
|
2024-02-03 23:25:14 +00:00
|
|
|
with open(
|
2024-03-15 19:57:39 +00:00
|
|
|
"./scripts/caching/authors.json", "r", encoding="utf-8"
|
2024-02-03 23:25:14 +00:00
|
|
|
) as authors_cache:
|
2024-01-23 18:57:22 +00:00
|
|
|
cached_authors = json.load(authors_cache)
|
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if mode in ["works", "authors"]:
|
2024-01-23 18:57:22 +00:00
|
|
|
if "authors" == mode:
|
2024-02-03 23:25:14 +00:00
|
|
|
matched_cached_authors = [
|
2024-03-06 21:34:20 +00:00
|
|
|
aut for aut in cached_authors if aut["ol_id"] == ol_id
|
2024-02-03 23:25:14 +00:00
|
|
|
]
|
2024-01-23 18:57:22 +00:00
|
|
|
if len(matched_cached_authors) == 1:
|
2024-02-03 23:25:14 +00:00
|
|
|
logging.info(
|
|
|
|
f"Found cached author '{matched_cached_authors[0]['name']}'"
|
|
|
|
)
|
2024-01-23 18:57:22 +00:00
|
|
|
return matched_cached_authors[0]
|
|
|
|
|
2024-01-15 21:43:23 +00:00
|
|
|
api_url = f"https://openlibrary.org{key}"
|
|
|
|
|
|
|
|
# Sending API request
|
2024-02-03 23:25:14 +00:00
|
|
|
response = requests.get(
|
|
|
|
api_url, headers={"accept": "application/json"}, timeout=15
|
|
|
|
)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
|
|
|
# Process the response
|
2024-01-17 21:17:29 +00:00
|
|
|
if 200 == response.status_code:
|
|
|
|
logger.debug(response.status_code)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
elif 429 == response.status_code:
|
2024-01-15 21:43:23 +00:00
|
|
|
time.sleep(2)
|
2024-01-23 18:57:22 +00:00
|
|
|
import_from_openlibrary_by_ol_key(key)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
|
|
|
else:
|
2024-01-17 19:23:35 +00:00
|
|
|
raise Exception(f"Error {response.status_code}: {response.text}")
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
# Rate limiting
|
|
|
|
time.sleep(1)
|
|
|
|
|
2024-01-15 21:43:23 +00:00
|
|
|
item = json.loads(response.text)
|
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if "authors" == mode:
|
2024-02-25 18:53:44 +00:00
|
|
|
author = {"ol_id": ol_id, "name": item["name"]}
|
2024-03-06 21:34:20 +00:00
|
|
|
print(author)
|
2024-01-17 21:17:29 +00:00
|
|
|
if "personal_name" in item:
|
|
|
|
if item["name"] != item["personal_name"]:
|
|
|
|
author["personal_name"] = item["personal_name"]
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
logger.info(f"Caching author '{author['name']}'…")
|
|
|
|
cached_authors.append(author)
|
|
|
|
with open(
|
2024-03-15 19:57:39 +00:00
|
|
|
"./scripts/caching/authors.json", "w", encoding="utf-8"
|
2024-01-23 18:57:22 +00:00
|
|
|
) as authors_cache:
|
|
|
|
json.dump(cached_authors, authors_cache, indent=4)
|
|
|
|
logger.info(f"Author '{author['name']}' cached!")
|
|
|
|
|
2024-01-15 21:43:23 +00:00
|
|
|
return author
|
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
if "works" == mode:
|
2024-03-06 21:34:20 +00:00
|
|
|
work = {"ol_id": ol_id, "title": item["title"], "authors": []}
|
|
|
|
|
|
|
|
if "authors" in item:
|
|
|
|
for author in item["authors"]:
|
2024-03-15 19:57:39 +00:00
|
|
|
work["authors"].append(
|
|
|
|
import_from_openlibrary_by_ol_key(author["author"]["key"])
|
|
|
|
)
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
for result_key in ["first_publish_date", "subjects"]:
|
|
|
|
if result_key in item:
|
|
|
|
work[result_key] = item[result_key]
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-03-06 21:34:20 +00:00
|
|
|
work["date_added"] = datetime.today().strftime("%Y-%m-%d")
|
|
|
|
|
2024-01-15 21:43:23 +00:00
|
|
|
return work
|
|
|
|
|
|
|
|
else:
|
|
|
|
raise Exception(f"Unknown OpenLibrary key '{mode}'")
|
|
|
|
|
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
def cleanup_result(item, media_type) -> dict:
|
|
|
|
"""Process a film, TV series, TV episode or book returned by their
|
|
|
|
respective APIs by removing unnecessary fields and adding others"""
|
2024-01-15 21:43:23 +00:00
|
|
|
|
|
|
|
for field_name in [
|
2024-01-17 21:17:29 +00:00
|
|
|
"adult", # TMDB
|
|
|
|
"backdrop_path", # TMDB
|
2024-03-15 19:57:39 +00:00
|
|
|
"budget", # TMDB
|
2024-01-17 21:17:29 +00:00
|
|
|
"copyright_date", # OpenLibrary
|
|
|
|
"classifications", # OpenLibrary
|
|
|
|
"created", # OpenLibrary
|
2024-02-03 23:25:14 +00:00
|
|
|
"dewey_decimal_class", # OpenLibary
|
2024-01-17 21:17:29 +00:00
|
|
|
"episode_type", # TMDB
|
|
|
|
"first_sentence", # OpenLibrary
|
|
|
|
"genre_ids", # TMDB
|
2024-03-15 19:57:39 +00:00
|
|
|
"homepage", # TMDB
|
2024-01-17 21:17:29 +00:00
|
|
|
"identifiers", # OpenLibrary
|
|
|
|
"media_type", # TMDB
|
|
|
|
"last_modified", # OpenLibrary
|
|
|
|
"latest_revision", # OpenLibrary
|
|
|
|
"lc_classifications", # OpenLibrary
|
2024-02-03 23:25:14 +00:00
|
|
|
"lccn", # OpenLibrary
|
2024-01-17 21:17:29 +00:00
|
|
|
"local_id", # OpenLibrary
|
2024-02-03 23:25:14 +00:00
|
|
|
"notes", # OpenLibrary
|
2024-01-17 21:17:29 +00:00
|
|
|
"ocaid", # OpenLibrary
|
|
|
|
"oclc_numbers", # OpenLibrary
|
2024-02-03 23:25:14 +00:00
|
|
|
"pagination", # OpenLibrary
|
|
|
|
"physical_dimensions", # OpenLibrary
|
2024-01-17 21:17:29 +00:00
|
|
|
"popularity", # TMDB
|
|
|
|
"production_code", # TMDB
|
2024-03-15 19:57:39 +00:00
|
|
|
"production_companies", # TMDB
|
|
|
|
"publish_places", # OpenLibrary
|
|
|
|
"revenue", # TMDB
|
2024-01-17 21:17:29 +00:00
|
|
|
"revision", # OpenLibrary
|
|
|
|
"runtime", # TMDB
|
|
|
|
"source_records", # OpenLibrary
|
2024-03-15 19:57:39 +00:00
|
|
|
"status", # TMDB
|
2024-01-17 21:17:29 +00:00
|
|
|
"still_path", # TMDB
|
2024-02-03 23:25:14 +00:00
|
|
|
"table_of_contents", # OpenLibrary
|
2024-03-15 19:57:39 +00:00
|
|
|
"tagline", # TMDB
|
2024-01-17 21:17:29 +00:00
|
|
|
"type", # OpenLibrary
|
2024-02-03 23:25:14 +00:00
|
|
|
"uri_descriptions", # OpenLibrary
|
|
|
|
"url", # OpenLibrary
|
2024-01-17 21:17:29 +00:00
|
|
|
"video", # TMDB
|
|
|
|
"vote_average", # TMDB
|
|
|
|
"vote_count", # TMDB
|
2024-02-03 23:25:14 +00:00
|
|
|
"weight", # OpenLibrary
|
2024-01-15 21:43:23 +00:00
|
|
|
]:
|
2024-01-17 21:17:29 +00:00
|
|
|
if field_name in item:
|
|
|
|
del item[field_name]
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-07-23 07:57:19 +00:00
|
|
|
if media_type in ["films", "tv_series", "tv_episodes"]:
|
2024-02-25 18:53:44 +00:00
|
|
|
item["tmdb_id"] = item["id"]
|
2024-02-17 12:54:37 +00:00
|
|
|
del item["id"]
|
|
|
|
|
2024-07-23 07:57:19 +00:00
|
|
|
title_key = "name" if "tv_series" == media_type else "title"
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if f"original_{title_key}" in item and "original_language" in item:
|
|
|
|
if (
|
|
|
|
item[f"original_{title_key}"] == item[title_key]
|
|
|
|
and item["original_language"] == "en"
|
|
|
|
):
|
|
|
|
del item[f"original_{title_key}"], item["original_language"]
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-07-23 07:57:19 +00:00
|
|
|
if "tv_episodes" == media_type:
|
2024-03-15 19:57:39 +00:00
|
|
|
item["series"] = {"tmdb_id": item["show_id"]}
|
|
|
|
del item["show_id"]
|
2024-02-25 18:53:44 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if "books" == media_type:
|
2024-02-17 12:54:37 +00:00
|
|
|
_, _, item["ol_id"] = item["key"].split("/")
|
2024-01-17 21:17:29 +00:00
|
|
|
del item["key"]
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
for key in ["isbn_10", "isbn_13"]:
|
2024-01-15 21:43:23 +00:00
|
|
|
if key in item:
|
|
|
|
if len(item[key]) > 1:
|
2024-01-23 18:57:22 +00:00
|
|
|
logger.warning("Multiple ISBN results")
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
item[key] = item[key][0]
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if "languages" in item:
|
|
|
|
item["languages"] = [
|
|
|
|
lang["key"].split("/")[2] for lang in item["languages"]
|
|
|
|
]
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if "translation_of" in item:
|
2024-02-03 23:25:14 +00:00
|
|
|
if not (
|
|
|
|
item["translation_of"].split(":")[0].lower()
|
|
|
|
== item["work"]["title"].split(":")[0].lower()
|
|
|
|
):
|
|
|
|
logger.warn(
|
2024-01-17 22:02:56 +00:00
|
|
|
f"translation_of '{item['translation_of']}' \
|
|
|
|
is different to work title '{item['work']['title']}'"
|
2024-01-17 21:17:29 +00:00
|
|
|
)
|
2024-03-15 19:57:39 +00:00
|
|
|
if "y" != input("Accept change? [y|n]: "):
|
2024-02-03 23:25:14 +00:00
|
|
|
raise Exception(
|
|
|
|
f"translation_of '{item['translation_of']}' \
|
|
|
|
is different to work title '{item['work']['title']}'"
|
|
|
|
)
|
|
|
|
del item["translation_of"]
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if "translated_from" in item:
|
|
|
|
if len(item["translated_from"]) > 1:
|
2024-01-15 21:43:23 +00:00
|
|
|
raise Exception("Multiple translated_from results")
|
|
|
|
|
2024-02-03 23:25:14 +00:00
|
|
|
item["work"]["original_language"] = item["translated_from"][0]["key"].split(
|
|
|
|
"/"
|
|
|
|
)[2]
|
2024-01-17 22:02:56 +00:00
|
|
|
del item["translated_from"]
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
if "date_added" not in item:
|
|
|
|
item["date_added"] = datetime.today().strftime("%Y-%m-%d")
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-14 14:00:07 +00:00
|
|
|
return item
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-12 21:36:10 +00:00
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
def main() -> None:
|
|
|
|
"""Prompt user to select media type and log to process"""
|
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
media_type = ""
|
2024-07-23 07:57:19 +00:00
|
|
|
while media_type not in ["films", "tv_episodes", "tv_series", "books"]:
|
|
|
|
media_type = input("Select media type [films|tv_episodes|tv_series|books]: ")
|
2024-01-15 21:43:23 +00:00
|
|
|
|
|
|
|
try:
|
2024-01-23 18:57:22 +00:00
|
|
|
item_id = ""
|
2024-03-15 19:57:39 +00:00
|
|
|
log = ""
|
2024-01-17 21:17:29 +00:00
|
|
|
if "films" == media_type:
|
|
|
|
while log not in ["log", "wishlist"]:
|
|
|
|
log = input("Enter log to update [log|wishlist]: ")
|
2024-01-15 21:43:23 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
while re.search("[0-9]+", item_id) is None:
|
|
|
|
item_id = input("Enter TMDB ID: ")
|
2024-01-12 21:36:10 +00:00
|
|
|
|
2024-01-17 21:17:29 +00:00
|
|
|
elif "books" == media_type:
|
|
|
|
while log not in ["log", "current", "wishlist"]:
|
|
|
|
log = input("Enter log to update [log|current|wishlist]: ")
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
while re.search("[0-9]+", item_id) is None:
|
2024-02-15 11:28:11 +00:00
|
|
|
if "wishlist" == log:
|
|
|
|
item_id = input("Enter OpenLibrary Work ID: ")
|
|
|
|
else:
|
|
|
|
item_id = "".join(re.findall(r"\d+", input("Enter ISBN: ")))
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-07-23 07:57:19 +00:00
|
|
|
elif "tv_episodes" == media_type:
|
2024-03-15 21:20:17 +00:00
|
|
|
log = "log"
|
2024-02-25 21:52:06 +00:00
|
|
|
while re.search("(tt)?[0-9]+", item_id) is None:
|
|
|
|
item_id = input("Enter TVDB or IMDB ID: ")
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-07-23 07:57:19 +00:00
|
|
|
elif "tv_series" == media_type:
|
2024-01-17 21:17:29 +00:00
|
|
|
while log not in ["log", "current", "wishlist"]:
|
|
|
|
log = input("Enter log to update [log|current|wishlist]: ")
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-23 18:57:22 +00:00
|
|
|
while re.search("[0-9]+", item_id) is None:
|
|
|
|
item_id = input("Enter TMDB ID: ")
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-03-15 19:57:39 +00:00
|
|
|
item_id_parsed = re.search("(OL|tt)?[0-9]+[WMA]?", item_id)
|
|
|
|
if item_id_parsed is not None:
|
|
|
|
add_item_to_log(item_id_parsed[0], media_type, log)
|
2024-01-09 22:35:47 +00:00
|
|
|
|
2024-01-17 22:02:56 +00:00
|
|
|
except Exception:
|
2024-01-17 19:23:35 +00:00
|
|
|
logger.exception("Exception occurred")
|
2024-01-14 14:00:07 +00:00
|
|
|
|
|
|
|
|
2024-01-14 15:11:01 +00:00
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|