Compare commits

...

6 Commits

Author SHA1 Message Date
Ben Goldsworthy 9dbcc64188 update log 2024-02-03 23:25:32 +00:00
Ben Goldsworthy 70c8d382b0 ignore script cache files 2024-02-03 23:25:25 +00:00
Ben Goldsworthy b4d64ccd34 fix script 2024-02-03 23:25:14 +00:00
Ben Goldsworthy 1c6cde3f4c process logs 2024-02-03 23:25:04 +00:00
Ben Goldsworthy 4a16cb8490 re-add date_added field from Grouvee export 2024-02-03 16:03:15 +00:00
Ben Goldsworthy 1f5760073f update logs 2024-02-03 15:31:41 +00:00
10 changed files with 201630 additions and 67852 deletions

1
.gitignore vendored
View File

@ -7,3 +7,4 @@ public/
logs/
.env
catalogue_venv/
scripts/caching/

View File

@ -43,59 +43,287 @@
"added_by_id": "9781449373320"
},
{
"Title": "Good-Bye to All That: An Autobiography",
"Author": "Robert Graves",
"ISBN": null,
"ISBN13": null,
"Publisher": "Anchor Books",
"Binding": "Paperback",
"Number of Pages": 347,
"Year Published": "1958",
"Original Publication Year": "1929",
"Date Added": "2013-05-07",
"Date Started": "2024-01-01",
"Read Count": 0
"title": "Goodbye to all that",
"authors": [
{
"id": "OL1097266A",
"name": "Robert Graves",
"personal_name": "Graves, Robert"
}
],
"publish_date": "1960",
"publishers": [
"Penguin"
],
"series": [
"Penguin twentieth century classics"
],
"subjects": [
"Graves, Robert, -- 1895-1985."
],
"languages": [
"eng"
],
"edition_name": "Rev. ed. / with a new prologue and epilogue.",
"isbn_10": "0140180982",
"publish_country": "xxk",
"by_statement": "Robert Graves.",
"number_of_pages": 282,
"covers": [
13074261
],
"work": {
"id": "OL5036954W",
"title": "Good-Bye to All That",
"subjects": [
"Biography",
"British Personal narratives",
"English Authors",
"English Personal narratives",
"European War, 1914-1918",
"Personal narratives",
"World War, 1914-1918",
"World War (1914-1918) fast (OCoLC)fst01180746",
"Biograf\ufffdia",
"Social conditions",
"Autores ingleses",
"Novela inglesa",
"BIOGRAPHY & AUTOBIOGRAPHY",
"World war, 1914-1918, personal narratives",
"Authors, biography",
"Authors, english",
"Fiction, general"
]
},
"id": "OL18237727M",
"published_in": [
"Harmondsworth"
],
"date_added": "2024-02-03"
},
{
"Title": "The End of Policing",
"Author": "Alex S. Vitale",
"ISBN": null,
"ISBN13": null,
"Publisher": "Verso",
"Binding": "Kindle Edition",
"Number of Pages": 272,
"Year Published": "2017",
"Original Publication Year": "2017",
"Date Added": "2020-06-05",
"Date Started": "2024-01-01",
"Read Count": 0
"title": "The End of Policing",
"publishers": [
"Verso Books"
],
"publish_date": "October 2017",
"isbn_13": "9781784782900",
"languages": [
"eng"
],
"physical_format": "Ebook",
"work": {
"id": "OL19735566W",
"title": "The End of Policing",
"subjects": [
"Police",
"Police misconduct",
"black lives matter",
"Police-community relations",
"Police brutality",
"African Americans",
"Violence against",
"Social conditions",
"Discrimination in criminal justice administration",
"Race relations",
"POLITICAL SCIENCE",
"Political Freedom & Security",
"Law Enforcement",
"SOCIAL SCIENCE",
"Discrimination & Race Relations",
"Public Policy",
"General",
"BUSINESS & ECONOMICS",
"Infrastructure",
"POLITICAL SCIENCE / Political Freedom & Security / Law Enforcement",
"SOCIAL SCIENCE / Discrimination & Race Relations",
"POLITICAL SCIENCE / Public Policy / General"
]
},
"id": "OL50982392M",
"date_added": "2024-02-03"
},
{
"Title": "France",
"Series": "Lonely Planet",
"Author": "Lonely Planet",
"ISBN13": "9781788680513",
"Publisher": "Lonely Planet Global Limited",
"Binding": "Paperback",
"Number of Pages": 1021,
"Year Published": "2021",
"Original Publication Year": "1994",
"Date Added": "2024-01-02",
"Date Started": "2023-12-25",
"Read Count": 0
"title": "France",
"publishers": [
"Loneley Planet Global"
],
"publish_date": "November 2021",
"covers": [
14575043
],
"edition_name": "Fourteenth edition",
"languages": [
"eng"
],
"physical_format": "Paperback",
"number_of_pages": 1024,
"contributors": [
{
"role": "Additional Author (this edition)",
"name": "Joel Balsam"
},
{
"role": "Additional Author (this edition)",
"name": "Alexis Averbuck"
},
{
"role": "Additional Author (this edition)",
"name": "Oliver Berry"
},
{
"role": "Additional Author (this edition)",
"name": "Celeste Brash"
},
{
"role": "Additional Author (this edition)",
"name": "Stuart Butler"
},
{
"role": "Additional Author (this edition)",
"name": "Jean-Bernard Carillet"
},
{
"role": "Additional Author (this edition)",
"name": "Gregor Clark"
},
{
"role": "Additional Author (this edition)",
"name": "Mark Elliott"
},
{
"role": "Additional Author (this edition)",
"name": "Steve Fallon"
},
{
"role": "Additional Author (this edition)",
"name": "Anita Isalska"
},
{
"role": "Additional Author (this edition)",
"name": "Catherine Le Nevez"
},
{
"role": "Additional Author (this edition)",
"name": "Christopher Pitts"
},
{
"role": "Additional Author (this edition)",
"name": "Regis St Louis"
},
{
"role": "Additional Author (this edition)",
"name": "Ryan Ver Berkmoes"
}
],
"isbn_13": "9781788680523",
"work": {
"id": "OL15419603W",
"title": "France",
"subjects": [
"Guidebooks",
"Travel - Foreign",
"Special Interest - Family",
"Travel",
"France",
"Travel & holiday guides",
"Europe - France"
]
},
"id": "OL50982390M",
"date_added": "2024-02-03"
},
{
"Title": "The Design of Everyday Things",
"Author": "Donald A. Norman",
"ISBN": "0465067107",
"ISBN13": "9780465067107",
"Publisher": "Basic Books",
"Binding": "Paperback",
"Number of Pages": 240,
"Year Published": "2002",
"Original Publication Year": "1988",
"Date Added": "2021-12-01",
"Date Started": "2023-12-24",
"Read Count": 0
"edition_name": "1st Basic paperback.",
"title": "The design of everyday things",
"languages": [
"eng"
],
"subjects": [
"Design, Industrial -- Psychological aspects",
"Human engineering"
],
"publish_country": "nyu",
"by_statement": "Donald A. Norman.",
"publishers": [
"Basic Books"
],
"authors": [
{
"id": "OL224976A",
"name": "Donald A. Norman"
}
],
"number_of_pages": 257,
"publish_date": "2002",
"work_title": [
"Psychology of everyday things"
],
"isbn_10": "0465067107",
"covers": [
14428210
],
"work": {
"id": "OL1879162W",
"title": "The Psychology of Everyday Things",
"first_publish_date": "August 1998",
"subjects": [
"Ergonomie",
"Industrial Psychology",
"Industri\u00eble vormgeving",
"Industrial Design",
"Psychological aspects",
"Psychology textbooks",
"Psychological aspects of Industrial design",
"Textbooks",
"Aspect psychologique",
"Design",
"Humanities textbooks",
"Projetos (administracao)",
"Human engineering",
"Long Now Manual for Civilization",
"New York Times reviewed",
"Cognitive psychology",
"Livres num\u00e9riques",
"Ergonomics",
"E-books",
"BUSINESS & ECONOMICS",
"Industries",
"Retailing",
"Product",
"PSYCHOLOGY",
"Applied Psychology",
"Electronic books",
"Industriell formgivning",
"Psykologiska aspekter",
"Industries / Retailing",
"Mechanical Engineering",
"Engineering & Applied Sciences",
"Industrial & Management Engineering",
"Nonfiction",
"Art",
"Business",
"Industrial design--psychological aspects",
"Psychology, industrial",
"Ts171.4 .n67 1990",
"Ts 171.4 n842p 1990",
"620.8/2",
"Business & economics--industries--retailing",
"Psychology--applied psychology",
"Design--product",
"Industriell formgivning--psykologiska aspekter",
"Ts171.4 .n67 2013",
"745.2019",
"745.2001/9",
"Bus057000 psy003000 des011000"
]
},
"id": "OL3702614M",
"published_in": [
"New York"
],
"date_added": "2021-12-01",
"date_started": "2023-12-24",
"read_count": 0
}
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,94 @@
[
{
"belongs_to_collection": null,
"genres": [
{
"id": 99,
"name": "Documentary"
}
],
"id": 465383,
"imdb_id": "tt7162400",
"original_language": "ar",
"original_title": "Ouroboros",
"overview": "This film is an homage to the Gaza Strip and to the possibility of hope beyond hopelessness. Ouroboros, the symbol of the snake eating its tail, is both end and beginning: death as regeneration. A 74-minute experimental narrative film that turns the destruction of Gaza into a story of heartbreak, Ouroboros asks what it means to be human when humanity has failed. Taking the form of a love story, the film's central character is Diego Marcon, a man who embarks on a circular journey to shed his pain only to experience it, again and again. In the course of a single day, his travel fuses together Native American territories, the ancient Italian city of Matera, a castle in Brittany, and the ruins of the Gaza Strip into a single landscape.",
"poster_path": "/37W4z9dkK77OTLMbDO9KvVADxXX.jpg",
"production_countries": [
{
"iso_3166_1": "BE",
"name": "Belgium"
},
{
"iso_3166_1": "FR",
"name": "France"
},
{
"iso_3166_1": "PS",
"name": "Palestinian Territory"
},
{
"iso_3166_1": "QA",
"name": "Qatar"
}
],
"release_date": "2017-04-29",
"spoken_languages": [
{
"english_name": "Arabic",
"iso_639_1": "ar",
"name": "\u0627\u0644\u0639\u0631\u0628\u064a\u0629"
},
{
"english_name": "English",
"iso_639_1": "en",
"name": "English"
},
{
"english_name": "Italian",
"iso_639_1": "it",
"name": "Italiano"
}
],
"title": "Ouroboros",
"date_added": "2024-02-03",
"date_finished": "2024-02-03",
"added_by_id": "465383",
"comments": "Radical Approaches to Filmmaking course"
},
{
"belongs_to_collection": null,
"genres": [
{
"id": 18,
"name": "Drama"
}
],
"id": 44967,
"imdb_id": "tt0015361",
"original_language": "ru",
"original_title": "\u0421\u0442\u0430\u0447\u043a\u0430",
"overview": "Workers in a factory in pre-revolutionary Russia go on strike and are met by violent suppression.",
"poster_path": "/mrDTaMxQKxWZ3uEs2G58vJWJDDL.jpg",
"production_countries": [
{
"iso_3166_1": "SU",
"name": "Soviet Union"
}
],
"release_date": "1925-04-28",
"spoken_languages": [
{
"english_name": "No Language",
"iso_639_1": "xx",
"name": "No Language"
}
],
"title": "Strike",
"date_added": "2024-02-03",
"date_finished": "2024-02-03",
"added_by_id": "44967",
"comments": "Radical Approaches to Filmmaking course"
},
{
"id": 14572,
"original_language": "ja",
@ -15996,4 +16086,4 @@
"date_finished": null,
"is_repeat": false
}
]
]

View File

@ -1,27 +1,29 @@
[
{
"Title": "Hellblade: Senua's Sacrifice",
"Series": "Hellblade",
"Developers": "Ninja Theory Ltd.",
"Date Released": "2017-08-08",
"Date Started": "2024-01-21",
"GiantBomb ID": "47363"
},
{
"Title": "Ancestors: The Humankind Odyssey",
"Date Started": "2023-08-08",
"Date Finished": "",
"Developers": "Panache Digital Games",
"Date Released": "2019-08-27",
"GiantBomb ID": "49527"
},
{
"Title": "TIS-100",
"Platforms": "PC",
"Date Started": "2016-12-24",
"Date Finished": "",
"Developers": "Zachtronics Industries",
"Date Released": "2015-07-20",
"GiantBomb ID": "49901"
}
]
{
"Title": "Death Stranding",
"Date Started": "2024-01-25",
"Platforms": "PC",
"Developers": "Kojima Productions",
"Date Released": "2019-11-08",
"GiantBomb ID": "54232",
"date_added": "2019-12-04T21:27:08Z"
},
{
"Title": "Ancestors: The Humankind Odyssey",
"Platform": "PC",
"Date Started": "2023-08-08",
"Developers": "Panache Digital Games",
"Date Released": "2019-08-27",
"GiantBomb ID": "49527",
"date_added": "2020-05-24T18:26:59Z"
},
{
"Title": "TIS-100",
"Platforms": "PC",
"Date Started": "2016-12-24",
"Developers": "Zachtronics Industries",
"Date Released": "2015-07-20",
"GiantBomb ID": "49901",
"date_added": "2020-01-06T12:41:38Z"
}
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,61 @@
import json
with open(f"./scripts/grouvee.json", "r", encoding="utf-8") as log_file:
orig_log_items = json.load(log_file)
for log in ["log", "current", "wishlist"]:
print(f"Processing {log}")
with open(f"./data/games/{log}.json", "r", encoding="utf-8") as log_file:
log_items = json.load(log_file)
for i, item in enumerate(log_items):
print(f"Processing {item['Title']}...")
if "GiantBomb ID" in item:
orig_item = [""]
if "" != item["GiantBomb ID"]:
orig_item = [
orig_item
for orig_item in orig_log_items
if orig_item["giantbomb_id"] == int(item["GiantBomb ID"])
]
elif "" == item["GiantBomb ID"]:
orig_item = [
orig_item
for orig_item in orig_log_items
if orig_item["name"] == item["Title"]
]
if [] == orig_item:
print(f"No item {item['Title']} found in original log!")
log_items[i] = item
break
elif 1 < len(orig_item):
raise Exception(f"Multiple items returned for {item['Title']}!")
else:
orig_item = orig_item[0]
if "Wish List" in orig_item["shelves"]:
item["date_added"] = orig_item["shelves"]["Wish List"]["date_added"]
elif "Backlog" in orig_item["shelves"]:
item["date_added"] = orig_item["shelves"]["Backlog"]["date_added"]
elif "Played" in orig_item["shelves"] and "log" == log:
item["date_added"] = orig_item["shelves"]["Played"]["date_added"]
else:
print(f"No date_added for {item['Title']}!")
log_items[i] = item
print(f"Finished processing {item['Title']}.")
with open(f"./data/games/{log}.json", "w", encoding="utf-8") as log_file:
json.dump(log_items, log_file, indent=4)
print(f"Finished processing {log}.")

View File

@ -13,6 +13,7 @@ from dotenv import load_dotenv
authors = []
def setup_logger(name="add_item"):
"""Set up the logger for console and file"""
@ -50,14 +51,18 @@ if "" == TVDB_API_KEY:
logger.error("TVDB API key not found")
def return_if_exists(item_id, media_type, log) -> dict|None:
def return_if_exists(item_id, media_type, log) -> dict | None:
"""Returns an item if it exists in the requested log"""
logger.info(f"Checking for '{item_id}' in '{log}'")
with open(f"./data/{media_type}/{log}.json", "r", encoding='utf-8') as log_file:
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
log_items = json.load(log_file)
existing_items = [log_item for log_item in log_items if "id" in log_item and log_item['id'] == int(item_id)]
existing_items = [
log_item
for log_item in log_items
if "id" in log_item and log_item["id"] == int(item_id)
]
if len(existing_items) > 0:
logger.info(f"Found item in '{log}'")
return existing_items[-1]
@ -68,15 +73,19 @@ def delete_existing(item_id, media_type, log) -> None:
"""Deletes an item from a log if it matches the ID"""
logger.info(f"Deleting '{item_id}' from '{log}'")
with open(f"./data/{media_type}/{log}.json", "r", encoding='utf-8') as log_file:
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
log_items = json.load(log_file)
old_len = len(log_items)
log_items = [log_item for log_item in log_items if "id" not in log_item or ("id" in log_item and log_item['id'] != int(item_id))]
log_items = [
log_item
for log_item in log_items
if "id" not in log_item or ("id" in log_item and log_item["id"] != int(item_id))
]
if len(log_items) < (old_len - 1):
raise Exception("More than one deletion made, discarding…")
with open(f"./data/{media_type}/{log}.json", "w", encoding='utf-8') as log_file:
with open(f"./data/{media_type}/{log}.json", "w", encoding="utf-8") as log_file:
json.dump(log_items, log_file, indent=4)
logger.info(f"'{item_id}' deleted from '{log}'")
@ -94,8 +103,15 @@ def check_for_existing(item_id, media_type, log) -> dict[dict, str]:
existing_item["is_repeat"] = True
return existing_item, None
for log_to_check in [p_log for p_log in ["log", "current", "wishlist"] if p_log != log]:
if ("current" == log_to_check and media_type in ["books", "games", "tv-series"]) or ("wishlist" == log_to_check and media_type in ["books", "games", "films", "tv-series"]):
for log_to_check in [
p_log for p_log in ["log", "current", "wishlist"] if p_log != log
]:
if (
"current" == log_to_check and media_type in ["books", "games", "tv-series"]
) or (
"wishlist" == log_to_check
and media_type in ["books", "games", "films", "tv-series"]
):
existing_item = return_if_exists(item_id, media_type, log_to_check)
if existing_item is not None:
return existing_item, log_to_check
@ -156,12 +172,12 @@ def add_item_to_log(item_id, media_type, log) -> None:
# Save changes
logger.info(f"Adding {media_type} to {log}")
with open(f"./data/{media_type}/{log}.json", "r", encoding='utf-8') as log_file:
with open(f"./data/{media_type}/{log}.json", "r", encoding="utf-8") as log_file:
log_items = json.load(log_file)
log_items.insert(0, item)
with open(f"./data/{media_type}/{log}.json", "w", encoding='utf-8') as log_file:
with open(f"./data/{media_type}/{log}.json", "w", encoding="utf-8") as log_file:
json.dump(log_items, log_file, indent=4)
logger.info(f"Added {media_type} {item_id} to {log}")
@ -177,10 +193,12 @@ def import_by_id(import_id, media_type) -> dict:
return import_from_tmdb_by_id(import_id, media_type)
if media_type in ["tv-episodes"]:
return #import_from_tvdb_by_id(import_id, media_type)
return # import_from_tvdb_by_id(import_id, media_type)
if media_type in ["books"]:
return import_from_openlibrary_by_id(import_id, media_type)
return import_from_openlibrary_by_id(
"".join(re.findall(r"\d+", import_id)), media_type
)
def import_from_tmdb_by_id(tmdb_id, media_type) -> dict:
@ -191,9 +209,7 @@ def import_from_tmdb_by_id(tmdb_id, media_type) -> dict:
# Sending API request
response = requests.get(
api_url,
headers={"Authorization": f"Bearer {TMDB_API_KEY}"},
timeout=15
api_url, headers={"Authorization": f"Bearer {TMDB_API_KEY}"}, timeout=15
)
# Process the response
@ -212,14 +228,8 @@ def import_from_tmdb_by_id(tmdb_id, media_type) -> dict:
response_data = json.loads(response.text)
if 1 == len(response_data):
item = response_data[0]
elif 0 == len(response_data):
raise Exception(f"Returned no results for {tmdb_id}")
# Modify the returned result to add additional data
return cleanup_result(item, media_type)
return cleanup_result(response_data, media_type)
def import_from_openlibrary_by_id(isbn, media_type) -> dict:
@ -253,10 +263,12 @@ def import_from_openlibrary_by_id(isbn, media_type) -> dict:
for i, sub_item in enumerate(item[key]):
item[key][i] = import_from_openlibrary_by_ol_key(sub_item["key"])
if "works" in item:
if len(item["works"]) > 1:
raise Exception(f"Multiple works found for {isbn}")
print(f"Multiple works found for {isbn}:")
print(item["works"])
idx = input(f"Select ID to use [0-{len(item['works'])-1}]: ")
item["works"][0] = item["works"][int(idx)]
item["work"] = item["works"][0]
del item["works"]
@ -275,20 +287,28 @@ def import_from_openlibrary_by_ol_key(key) -> dict:
_, mode, ol_id = key.split("/")
if "authors" == mode:
with open(f"./scripts/caching/authors.json", "r", encoding='utf-8') as authors_cache:
with open(
f"./scripts/caching/authors.json", "r", encoding="utf-8"
) as authors_cache:
cached_authors = json.load(authors_cache)
if mode in ["works", "authors"]:
if "authors" == mode:
matched_cached_authors = [aut for aut in cached_authors if aut['id'] == ol_id]
matched_cached_authors = [
aut for aut in cached_authors if aut["id"] == ol_id
]
if len(matched_cached_authors) == 1:
logging.info(f"Found cached author '{matched_cached_authors[0]['name']}'")
logging.info(
f"Found cached author '{matched_cached_authors[0]['name']}'"
)
return matched_cached_authors[0]
api_url = f"https://openlibrary.org{key}"
# Sending API request
response = requests.get(api_url, headers={"accept": "application/json"}, timeout=15)
response = requests.get(
api_url, headers={"accept": "application/json"}, timeout=15
)
# Process the response
if 200 == response.status_code:
@ -316,9 +336,7 @@ def import_from_openlibrary_by_ol_key(key) -> dict:
logger.info(f"Caching author '{author['name']}'")
cached_authors.append(author)
with open(
f"./scripts/caching/authors.json",
"w",
encoding='utf-8'
f"./scripts/caching/authors.json", "w", encoding="utf-8"
) as authors_cache:
json.dump(cached_authors, authors_cache, indent=4)
logger.info(f"Author '{author['name']}' cached!")
@ -345,39 +363,45 @@ def cleanup_result(item, media_type) -> dict:
for field_name in [
"adult", # TMDB
"backdrop_path", # TMDB
"budget", # TMDB
"copyright_date", # OpenLibrary
"classifications", # OpenLibrary
"created", # OpenLibrary
"dewey_decimal_class", # OpenLibary
"dewey_decimal_class", # OpenLibary
"episode_type", # TMDB
"first_sentence", # OpenLibrary
"genre_ids", # TMDB
"homepage", # TMDB
"identifiers", # OpenLibrary
"media_type", # TMDB
"last_modified", # OpenLibrary
"latest_revision", # OpenLibrary
"lc_classifications", # OpenLibrary
"lccn", # OpenLibrary
"lccn", # OpenLibrary
"local_id", # OpenLibrary
"notes", # OpenLibrary
"notes", # OpenLibrary
"ocaid", # OpenLibrary
"oclc_numbers", # OpenLibrary
"pagination", # OpenLibrary
"physical_dimensions", # OpenLibrary
"pagination", # OpenLibrary
"physical_dimensions", # OpenLibrary
"popularity", # TMDB
"production_code", # TMDB
"production_companies", # TMDB
"revenue", # TMDB
"revision", # OpenLibrary
"runtime", # TMDB
"source_records", # OpenLibrary
"status", # TMDB
"still_path", # TMDB
"table_of_contents", # OpenLibrary
"table_of_contents", # OpenLibrary
"tagline", # TMDB
"type", # OpenLibrary
"uri_descriptions", # OpenLibrary
"url", # OpenLibrary
"uri_descriptions", # OpenLibrary
"url", # OpenLibrary
"video", # TMDB
"vote_average", # TMDB
"vote_count", # TMDB
"weight", # OpenLibrary
"weight", # OpenLibrary
]:
if field_name in item:
del item[field_name]
@ -413,21 +437,28 @@ def cleanup_result(item, media_type) -> dict:
]
if "translation_of" in item:
if item["translation_of"].split(":")[0].lower() == item["work"]["title"].split(":")[0].lower():
del item["translation_of"]
else:
raise Exception(
if not (
item["translation_of"].split(":")[0].lower()
== item["work"]["title"].split(":")[0].lower()
):
logger.warn(
f"translation_of '{item['translation_of']}' \
is different to work title '{item['work']['title']}'"
)
if 'y' != input("Accept change? [y|n]: "):
raise Exception(
f"translation_of '{item['translation_of']}' \
is different to work title '{item['work']['title']}'"
)
del item["translation_of"]
if "translated_from" in item:
if len(item["translated_from"]) > 1:
raise Exception("Multiple translated_from results")
item["work"]["original_language"] = item["translated_from"][0][
"key"
].split("/")[2]
item["work"]["original_language"] = item["translated_from"][0]["key"].split(
"/"
)[2]
del item["translated_from"]
if "date_added" not in item:
@ -459,7 +490,7 @@ def main() -> None:
log = input("Enter log to update [log|current|wishlist]: ")
while re.search("[0-9]+", item_id) is None:
item_id = input("Enter ISBN: ")
item_id = "".join(re.findall(r"\d+", input("Enter ISBN: ")))
elif "tv-episodes" == media_type:
log = "log"