Cataloguer/scripts/add_item.py

583 lines
19 KiB
Python
Raw Normal View History

2024-01-17 22:02:56 +00:00
"""
Add a new item to a media catalogue, using various APIs:
- TV series' and films using the TMDB API and IDs;
- TV episodes using the TMDB API and TVDB IDs (because the TMDB
API is difficult and a lot of TMDB records don't have IMDB IDs);
- books using the OpenLibrary API and ISBNs; and
- games using the GiantBomb API and IDs.
2024-01-17 22:02:56 +00:00
"""
2024-01-09 22:35:47 +00:00
import json
import logging
2024-01-14 14:00:07 +00:00
import os
2024-01-09 22:35:47 +00:00
import re
2024-01-17 22:02:56 +00:00
import time
from datetime import datetime
2024-01-09 22:35:47 +00:00
import requests
2024-01-17 22:02:56 +00:00
from dotenv import load_dotenv
2024-01-09 22:35:47 +00:00
2024-01-23 18:57:22 +00:00
authors = []
2024-02-03 23:25:14 +00:00
def setup_logger(name="add_item"):
2024-01-17 22:02:56 +00:00
"""Set up the logger for console and file"""
logr = logging.getLogger(name)
2024-01-17 19:23:35 +00:00
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler("./logs/run.log")
2024-01-17 19:23:35 +00:00
2024-01-23 18:57:22 +00:00
logging.root.setLevel(logging.INFO)
2024-01-17 19:23:35 +00:00
c_handler.setLevel(logging.INFO)
2024-01-23 18:57:22 +00:00
f_handler.setLevel(logging.WARNING)
2024-01-17 19:23:35 +00:00
c_format = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
f_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
2024-01-17 19:23:35 +00:00
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
2024-01-17 22:02:56 +00:00
logr.addHandler(c_handler)
logr.addHandler(f_handler)
2024-01-17 19:23:35 +00:00
2024-01-17 22:02:56 +00:00
return logr
2024-01-17 19:23:35 +00:00
logger = setup_logger()
2024-01-14 15:11:01 +00:00
load_dotenv()
TMDB_API_KEY = os.getenv("TMDB_API_KEY")
2024-01-14 15:11:01 +00:00
if "" == TMDB_API_KEY:
logger.error("TMDB API key not found")
2024-01-14 15:11:01 +00:00
2024-02-03 23:25:14 +00:00
def return_if_exists(item_id, media_type, log) -> dict | None:
2024-01-23 18:57:22 +00:00
"""Returns an item if it exists in the requested log"""
logger.info(f"Checking for '{item_id}' in '{log}'")
2024-02-03 23:25:14 +00:00
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
2024-01-23 18:57:22 +00:00
log_items = json.load(log_file)
2024-02-03 23:25:14 +00:00
existing_items = [
log_item
for log_item in log_items
if "id" in log_item and log_item["id"] == int(item_id)
]
2024-01-23 18:57:22 +00:00
if len(existing_items) > 0:
logger.info(f"Found item in '{log}'")
return existing_items[-1]
logger.info(f"'{item_id}' not found in '{log}'")
def delete_existing(item_id, media_type, log) -> None:
"""Deletes an item from a log if it matches the ID"""
logger.info(f"Deleting '{item_id}' from '{log}'")
2024-02-03 23:25:14 +00:00
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
2024-01-23 18:57:22 +00:00
log_items = json.load(log_file)
old_len = len(log_items)
2024-02-03 23:25:14 +00:00
log_items = [
log_item
for log_item in log_items
if "id" not in log_item or ("id" in log_item and log_item["id"] != int(item_id))
]
2024-01-23 18:57:22 +00:00
if len(log_items) < (old_len - 1):
raise Exception("More than one deletion made, discarding…")
2024-02-03 23:25:14 +00:00
with open(f"./data/{media_type}/{log}.json", "w", encoding="utf-8") as log_file:
2024-01-23 18:57:22 +00:00
json.dump(log_items, log_file, indent=4)
logger.info(f"'{item_id}' deleted from '{log}'")
def check_for_existing(item_id, media_type, log) -> dict[dict, str]:
"""
Check for an existing item in the current log, and pull the
`date_added` etc. and mark it as a repeat if so.
Otherwise, check for an existing item in the other logs, and move
it to the specified log if so.
"""
2024-01-23 18:57:22 +00:00
logger.info(f"Checking for '{item_id}' in logs…")
# Check in specified log
existing_item = return_if_exists(item_id, media_type, log)
if existing_item is not None:
if "log" == log:
existing_item["is_repeat"] = True
return existing_item, None
2024-02-03 23:25:14 +00:00
for log_to_check in [
p_log for p_log in ["log", "current", "wishlist"] if p_log != log
]:
if (
"current" == log_to_check and media_type in ["books", "games", "tv-series"]
) or (
"wishlist" == log_to_check
and media_type in ["books", "games", "films", "tv-series"]
):
2024-01-23 18:57:22 +00:00
existing_item = return_if_exists(item_id, media_type, log_to_check)
if existing_item is not None:
return existing_item, log_to_check
return None, None
2024-01-17 22:02:56 +00:00
def add_item_to_log(item_id, media_type, log) -> None:
2024-01-14 15:11:01 +00:00
"""Add a film, book, TV series or TV episode to a log"""
2024-01-17 19:23:35 +00:00
logger.info(f"Processing {item_id}")
2024-01-14 15:11:01 +00:00
2024-02-15 10:22:03 +00:00
item = None
log_to_delete = None
2024-02-15 11:28:11 +00:00
if "tv-episodes" != media_type and ("books" != media_type and "wishlist" != log):
2024-02-15 10:22:03 +00:00
item, log_to_delete = check_for_existing(item_id, media_type, log)
2024-01-23 18:57:22 +00:00
if item is None:
2024-02-15 11:28:11 +00:00
item = import_by_id(item_id, media_type, log)
2024-01-23 18:57:22 +00:00
if item is None:
raise Exception("No item found")
2024-01-14 15:11:01 +00:00
if "books" == media_type and "wishlist" != log:
item, log_to_delete = check_for_existing(item['work']['ol_id'], media_type, log)
if item is None:
item, log_to_delete = check_for_existing(item['ol_id'], media_type, log)
if log in ["log", "current"]:
2024-01-23 18:57:22 +00:00
if "date_started" not in item and media_type in ["books", "tv-series", "games"]:
date_started = ""
while re.search("[0-9]{4}-[0-9]{2}-[0-9]{2}", date_started) is None:
date_started = input("Enter date started [YYYY-MM-DD, t for today]: ")
if "t" == date_started:
date_started = datetime.today().strftime("%Y-%m-%d")
item["date_started"] = date_started
2024-01-14 15:11:01 +00:00
2024-01-23 18:57:22 +00:00
if "date_finished" not in item and "log" == log:
date_finished = ""
while re.search("[0-9]{4}-[0-9]{2}-[0-9]{2}", date_finished) is None:
date_finished = input("Enter date finished [YYYY-MM-DD, t for today]: ")
if "t" == date_finished:
date_finished = datetime.today().strftime("%Y-%m-%d")
item["date_finished"] = date_finished
2024-01-23 18:57:22 +00:00
if "is_repeat" not in item:
is_repeat = ""
while is_repeat not in ["y", "n"]:
is_repeat = input("Is this a repeat entry? [y/n]: ")
if "y" == is_repeat:
item["is_repeat"] = True
if "added_by_id" not in item:
item["added_by_id"] = item_id
2024-01-14 15:11:01 +00:00
2024-01-23 18:57:22 +00:00
if "comments" not in item:
comments = input("Enter comments (optional): ")
if "" != comments:
item["comments"] = comments
2024-01-14 15:11:01 +00:00
# Validation step
print(f"{media_type} data to add:\n")
print(json.dumps(item, indent=4))
if "y" != input("\nDoes this look correct? [y]: "):
return
2024-01-14 15:11:01 +00:00
# Save changes
2024-01-17 19:23:35 +00:00
logger.info(f"Adding {media_type} to {log}")
2024-01-14 15:11:01 +00:00
2024-02-03 23:25:14 +00:00
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
2024-01-14 15:11:01 +00:00
log_items = json.load(log_file)
log_items.insert(0, item)
2024-02-03 23:25:14 +00:00
with open(f"./data/{media_type}/{log}.json", "w", encoding="utf-8") as log_file:
json.dump(log_items, log_file, indent=4)
2024-01-14 15:11:01 +00:00
2024-01-17 19:23:35 +00:00
logger.info(f"Added {media_type} {item_id} to {log}")
2024-01-14 15:11:01 +00:00
2024-01-23 18:57:22 +00:00
if log_to_delete is not None:
delete_existing(item_id, media_type, log_to_delete)
2024-01-14 14:00:07 +00:00
2024-02-15 11:28:11 +00:00
def import_by_id(import_id, media_type, log) -> dict:
2024-01-17 22:02:56 +00:00
"""Import from the appropriate API by unique ID"""
if media_type in ["films", "tv-series"]:
2024-01-23 18:57:22 +00:00
return import_from_tmdb_by_id(import_id, media_type)
2024-01-17 22:02:56 +00:00
if media_type in ["tv-episodes"]:
return import_from_tmdb_by_external_id(import_id, media_type)
2024-01-17 22:02:56 +00:00
if media_type in ["books"]:
2024-02-15 11:28:11 +00:00
if "wishlist" == log:
return import_from_openlibrary_by_ol_key(import_id)
else:
return import_from_openlibrary_by_isbn(
2024-02-15 11:28:11 +00:00
"".join(re.findall(r"\d+", import_id)), media_type
)
2024-01-14 14:00:07 +00:00
def import_from_tmdb_by_external_id(external_id, media_type) -> dict:
"""Retrieve a film, TV show or TV episode from TMDB using an IMDB or TVDB ID"""
2024-01-09 22:35:47 +00:00
api_url = f"https://api.themoviedb.org/3/find/{external_id}"
2024-02-15 10:22:03 +00:00
# Sending API request
response = requests.get(
api_url,
headers={"Authorization": f"Bearer {TMDB_API_KEY}"},
params={"external_source": "imdb_id" if re.search("tt[0-9]+", external_id) else "tvdb_id"},
2024-02-15 10:22:03 +00:00
timeout=15
)
# Process the response
if 200 == response.status_code:
logger.debug(response.status_code)
elif 429 == response.status_code:
time.sleep(2)
return import_from_tmdb_by_external_id(external_id, media_type)
2024-02-15 10:22:03 +00:00
else:
raise Exception(f"Error {response.status_code}: {response.text}")
key = ""
if "tv-episodes" == media_type:
key = "tv_episode_results"
elif "tv-series" == media_type:
key = "tv_results"
elif "films" == media_type:
key = "movie_results"
response_data = json.loads(response.text)[key][0]
if response_data == None:
raise Exception(f"Nothing found for TVDB ID {external_id}!")
2024-02-15 10:22:03 +00:00
# Modify the returned result to add additional data
return cleanup_result(response_data, media_type)
def import_from_tmdb_by_id(tmdb_id, media_type) -> dict:
"""Retrieve a film, TV show or TV episode from TMDB using an TMDB ID"""
2024-01-23 18:57:22 +00:00
api_path = "movie" if "films" == media_type else "tv"
api_url = f"https://api.themoviedb.org/3/{api_path}/{tmdb_id}"
2024-01-09 22:35:47 +00:00
# Sending API request
response = requests.get(
2024-02-03 23:25:14 +00:00
api_url, headers={"Authorization": f"Bearer {TMDB_API_KEY}"}, timeout=15
2024-01-09 22:35:47 +00:00
)
# Process the response
if 200 == response.status_code:
logger.debug(response.status_code)
elif 429 == response.status_code:
2024-01-09 22:35:47 +00:00
time.sleep(2)
2024-01-23 18:57:22 +00:00
return import_from_tmdb_by_id(tmdb_id, media_type)
2024-01-09 22:35:47 +00:00
else:
raise Exception(f"Error {response.status_code}: {response.text}")
2024-01-09 22:35:47 +00:00
2024-01-23 18:57:22 +00:00
response_data = json.loads(response.text)
2024-01-14 14:00:07 +00:00
2024-01-09 22:35:47 +00:00
# Modify the returned result to add additional data
2024-02-03 23:25:14 +00:00
return cleanup_result(response_data, media_type)
def import_from_openlibrary_by_isbn(isbn, media_type) -> dict:
"""Retrieve a film, TV show or TV episode from TMDB using an IMDB ID"""
2024-01-23 18:57:22 +00:00
logging.info(f"Importing '{isbn}'")
api_url = f"https://openlibrary.org/isbn/{isbn}"
# Sending API request
2024-01-17 22:02:56 +00:00
response = requests.get(api_url, headers={"accept": "application/json"}, timeout=15)
# Process the response
if 200 == response.status_code:
logger.debug(response.status_code)
elif 429 == response.status_code:
time.sleep(2)
return import_from_openlibrary_by_isbn(isbn, media_type)
2024-01-23 18:57:22 +00:00
elif 404 == response.status_code:
logger.error(f"{response.status_code}: Not Found for ISBN '{isbn}'")
return None
else:
2024-01-17 19:23:35 +00:00
raise Exception(f"Error {response.status_code}: {response.text}")
item = json.loads(response.text)
2024-01-09 22:35:47 +00:00
for key in ["authors", "works"]:
if key in item:
for i, sub_item in enumerate(item[key]):
item[key][i] = import_from_openlibrary_by_ol_key(sub_item["key"])
2024-01-09 22:35:47 +00:00
if "works" in item:
if len(item["works"]) > 1:
2024-02-03 23:25:14 +00:00
print(f"Multiple works found for {isbn}:")
print(item["works"])
idx = input(f"Select ID to use [0-{len(item['works'])-1}]: ")
item["works"][0] = item["works"][int(idx)]
2024-01-09 22:35:47 +00:00
2024-01-17 22:02:56 +00:00
item["work"] = item["works"][0]
del item["works"]
2024-01-23 18:57:22 +00:00
# Rate limiting
time.sleep(1)
# Modify the returned result to add additional data
return cleanup_result(item, media_type)
2024-01-17 22:02:56 +00:00
def import_from_openlibrary_by_ol_key(key) -> dict:
"""Retrieves an item (author or work, NOT edition) from OpenLibrary using an OL key"""
2024-02-15 11:28:11 +00:00
if (len(key.split("/")) == 1):
key = f"/works/{key}"
2024-01-23 18:57:22 +00:00
logger.info(f"Retrieving {key}")
_, mode, ol_id = key.split("/")
2024-01-23 18:57:22 +00:00
if "authors" == mode:
2024-02-03 23:25:14 +00:00
with open(
f"./scripts/caching/authors.json", "r", encoding="utf-8"
) as authors_cache:
2024-01-23 18:57:22 +00:00
cached_authors = json.load(authors_cache)
if mode in ["works", "authors"]:
2024-01-23 18:57:22 +00:00
if "authors" == mode:
2024-02-03 23:25:14 +00:00
matched_cached_authors = [
aut for aut in cached_authors if aut["id"] == ol_id
]
2024-01-23 18:57:22 +00:00
if len(matched_cached_authors) == 1:
2024-02-03 23:25:14 +00:00
logging.info(
f"Found cached author '{matched_cached_authors[0]['name']}'"
)
2024-01-23 18:57:22 +00:00
return matched_cached_authors[0]
api_url = f"https://openlibrary.org{key}"
# Sending API request
2024-02-03 23:25:14 +00:00
response = requests.get(
api_url, headers={"accept": "application/json"}, timeout=15
)
# Process the response
if 200 == response.status_code:
logger.debug(response.status_code)
elif 429 == response.status_code:
time.sleep(2)
2024-01-23 18:57:22 +00:00
import_from_openlibrary_by_ol_key(key)
else:
2024-01-17 19:23:35 +00:00
raise Exception(f"Error {response.status_code}: {response.text}")
2024-01-23 18:57:22 +00:00
# Rate limiting
time.sleep(1)
item = json.loads(response.text)
if "authors" == mode:
author = {"ol_id": ol_id, "name": item["name"]}
if "personal_name" in item:
if item["name"] != item["personal_name"]:
author["personal_name"] = item["personal_name"]
2024-01-23 18:57:22 +00:00
logger.info(f"Caching author '{author['name']}'")
cached_authors.append(author)
with open(
2024-02-03 23:25:14 +00:00
f"./scripts/caching/authors.json", "w", encoding="utf-8"
2024-01-23 18:57:22 +00:00
) as authors_cache:
json.dump(cached_authors, authors_cache, indent=4)
logger.info(f"Author '{author['name']}' cached!")
return author
2024-01-17 22:02:56 +00:00
if "works" == mode:
work = {"ol_id": ol_id, "title": item["title"]}
2024-01-17 22:02:56 +00:00
for result_key in ["first_publish_date", "subjects"]:
if result_key in item:
work[result_key] = item[result_key]
return work
else:
raise Exception(f"Unknown OpenLibrary key '{mode}'")
2024-01-17 22:02:56 +00:00
def cleanup_result(item, media_type) -> dict:
"""Process a film, TV series, TV episode or book returned by their
respective APIs by removing unnecessary fields and adding others"""
for field_name in [
"adult", # TMDB
"backdrop_path", # TMDB
2024-02-03 23:25:14 +00:00
"budget", # TMDB
"copyright_date", # OpenLibrary
"classifications", # OpenLibrary
"created", # OpenLibrary
2024-02-03 23:25:14 +00:00
"dewey_decimal_class", # OpenLibary
"episode_type", # TMDB
"first_sentence", # OpenLibrary
"genre_ids", # TMDB
2024-02-03 23:25:14 +00:00
"homepage", # TMDB
"identifiers", # OpenLibrary
"media_type", # TMDB
"last_modified", # OpenLibrary
"latest_revision", # OpenLibrary
"lc_classifications", # OpenLibrary
2024-02-03 23:25:14 +00:00
"lccn", # OpenLibrary
"local_id", # OpenLibrary
2024-02-03 23:25:14 +00:00
"notes", # OpenLibrary
"ocaid", # OpenLibrary
"oclc_numbers", # OpenLibrary
2024-02-03 23:25:14 +00:00
"pagination", # OpenLibrary
"physical_dimensions", # OpenLibrary
"popularity", # TMDB
"production_code", # TMDB
2024-02-03 23:25:14 +00:00
"production_companies", # TMDB
"publish_places", # OpenLibrary
2024-02-03 23:25:14 +00:00
"revenue", # TMDB
"revision", # OpenLibrary
"runtime", # TMDB
"source_records", # OpenLibrary
2024-02-03 23:25:14 +00:00
"status", # TMDB
"still_path", # TMDB
2024-02-03 23:25:14 +00:00
"table_of_contents", # OpenLibrary
"tagline", # TMDB
"type", # OpenLibrary
2024-02-03 23:25:14 +00:00
"uri_descriptions", # OpenLibrary
"url", # OpenLibrary
"video", # TMDB
"vote_average", # TMDB
"vote_count", # TMDB
2024-02-03 23:25:14 +00:00
"weight", # OpenLibrary
]:
if field_name in item:
del item[field_name]
2024-01-09 22:35:47 +00:00
if media_type in ["films", "tv-series", "tv-episodes"]:
item["tmdb_id"] = item["id"]
2024-02-17 12:54:37 +00:00
del item["id"]
title_key = "name" if "tv-series" == media_type else "title"
if f"original_{title_key}" in item and "original_language" in item:
if (
item[f"original_{title_key}"] == item[title_key]
and item["original_language"] == "en"
):
del item[f"original_{title_key}"], item["original_language"]
if "tv-episodes" == media_type:
2024-02-25 21:52:06 +00:00
item['series'] = { 'tmdb_id': item['show_id'] }
del item['show_id']
if "books" == media_type:
2024-02-17 12:54:37 +00:00
_, _, item["ol_id"] = item["key"].split("/")
del item["key"]
for key in ["isbn_10", "isbn_13"]:
if key in item:
if len(item[key]) > 1:
2024-01-23 18:57:22 +00:00
logger.warning("Multiple ISBN results")
2024-01-17 22:02:56 +00:00
item[key] = item[key][0]
if "languages" in item:
item["languages"] = [
lang["key"].split("/")[2] for lang in item["languages"]
]
if "translation_of" in item:
2024-02-03 23:25:14 +00:00
if not (
item["translation_of"].split(":")[0].lower()
== item["work"]["title"].split(":")[0].lower()
):
logger.warn(
2024-01-17 22:02:56 +00:00
f"translation_of '{item['translation_of']}' \
is different to work title '{item['work']['title']}'"
)
2024-02-03 23:25:14 +00:00
if 'y' != input("Accept change? [y|n]: "):
raise Exception(
f"translation_of '{item['translation_of']}' \
is different to work title '{item['work']['title']}'"
)
del item["translation_of"]
if "translated_from" in item:
if len(item["translated_from"]) > 1:
raise Exception("Multiple translated_from results")
2024-02-03 23:25:14 +00:00
item["work"]["original_language"] = item["translated_from"][0]["key"].split(
"/"
)[2]
2024-01-17 22:02:56 +00:00
del item["translated_from"]
2024-01-09 22:35:47 +00:00
if "date_added" not in item:
item["date_added"] = datetime.today().strftime("%Y-%m-%d")
2024-01-09 22:35:47 +00:00
2024-01-14 14:00:07 +00:00
return item
2024-01-09 22:35:47 +00:00
2024-01-12 21:36:10 +00:00
2024-01-17 22:02:56 +00:00
def main() -> None:
"""Prompt user to select media type and log to process"""
media_type = ""
while media_type not in ["films", "tv-episodes", "tv-series", "books"]:
media_type = input("Select media type [films|tv-episodes|tv-series|books]: ")
try:
2024-01-23 18:57:22 +00:00
item_id = ""
if "films" == media_type:
log = ""
while log not in ["log", "wishlist"]:
log = input("Enter log to update [log|wishlist]: ")
2024-01-23 18:57:22 +00:00
while re.search("[0-9]+", item_id) is None:
item_id = input("Enter TMDB ID: ")
2024-01-12 21:36:10 +00:00
elif "books" == media_type:
log = ""
while log not in ["log", "current", "wishlist"]:
log = input("Enter log to update [log|current|wishlist]: ")
2024-01-09 22:35:47 +00:00
2024-01-23 18:57:22 +00:00
while re.search("[0-9]+", item_id) is None:
2024-02-15 11:28:11 +00:00
if "wishlist" == log:
item_id = input("Enter OpenLibrary Work ID: ")
else:
item_id = "".join(re.findall(r"\d+", input("Enter ISBN: ")))
2024-01-09 22:35:47 +00:00
elif "tv-episodes" == media_type:
2024-01-23 18:57:22 +00:00
log = "log"
2024-02-25 21:52:06 +00:00
while re.search("(tt)?[0-9]+", item_id) is None:
item_id = input("Enter TVDB or IMDB ID: ")
2024-01-09 22:35:47 +00:00
elif "tv-series" == media_type:
log = ""
while log not in ["log", "current", "wishlist"]:
log = input("Enter log to update [log|current|wishlist]: ")
2024-01-09 22:35:47 +00:00
2024-01-23 18:57:22 +00:00
while re.search("[0-9]+", item_id) is None:
item_id = input("Enter TMDB ID: ")
2024-01-09 22:35:47 +00:00
2024-02-25 21:52:06 +00:00
add_item_to_log(re.search("(tt)?[0-9]+", item_id)[0], media_type, log)
2024-01-09 22:35:47 +00:00
2024-01-17 22:02:56 +00:00
except Exception:
2024-01-17 19:23:35 +00:00
logger.exception("Exception occurred")
2024-01-14 14:00:07 +00:00
2024-01-14 15:11:01 +00:00
if __name__ == "__main__":
main()