update logs and scripts

This commit is contained in:
Ben Goldsworthy 2024-01-17 20:23:35 +01:00
parent 06957e053f
commit cc6e4de409
6 changed files with 44762 additions and 44602 deletions

View File

@ -1,58 +1,101 @@
[
{
"Title": "Good-Bye to All That: An Autobiography",
"Author": "Robert Graves",
"ISBN": null,
"ISBN13": null,
"Publisher": "Anchor Books",
"Binding": "Paperback",
"Number of Pages": 347,
"Year Published": "1958",
"Original Publication Year": "1929",
"Date Added": "2013-05-07",
"Date Started": "2024-01-01",
"Read Count": 0
},
{
"Title": "The End of Policing",
"Author": "Alex S. Vitale",
"ISBN": null,
"ISBN13": null,
"Publisher": "Verso",
"Binding": "Kindle Edition",
"Number of Pages": 272,
"Year Published": "2017",
"Original Publication Year": "2017",
"Date Added": "2020-06-05",
"Date Started": "2024-01-01",
"Read Count": 0
},
{
"Title": "France",
"Series": "Lonely Planet",
"Author": "Lonely Planet",
"ISBN13": "9781788680513",
"Publisher": "Lonely Planet Global Limited",
"Binding": "Paperback",
"Number of Pages": 1021,
"Year Published": "2021",
"Original Publication Year": "1994",
"Date Added": "2024-01-02",
"Date Started": "2023-12-25",
"Read Count": 0
},
{
"Title": "The Design of Everyday Things",
"Author": "Donald A. Norman",
"ISBN": "0465067107",
"ISBN13": "9780465067107",
"Publisher": "Basic Books",
"Binding": "Paperback",
"Number of Pages": 240,
"Year Published": "2002",
"Original Publication Year": "1988",
"Date Added": "2021-12-01",
"Date Started": "2023-12-24",
"Read Count": 0
}
]
{
"publishers": [
"O'Reilly Media"
],
"title": "Designing Data-Intensive Applications: The Big Ideas Behind Reliable, Scalable, and Maintainable Systems",
"number_of_pages": 624,
"covers": [
8434671
],
"isbn_13": "9781449373320",
"isbn_10": "1449373321",
"publish_date": "Apr 02, 2017",
"authors": [
{
"id": "OL7477772A",
"name": "Martin Kleppmann"
}
],
"oclc_numbers": [
"976434277"
],
"work": {
"id": "OL19293745W",
"title": "Designing Data-Intensive Applications",
"subjects": [
"Development",
"Web site development",
"Application software",
"Database management",
"Databases",
"COMPUTERS",
"Desktop Applications",
"Project Management software",
"Application software--development",
"Tk5105.888 .q44 2017",
"005.276"
]
},
"id": "OL26780701M",
"date_added": "2024-01-17",
"date_started": "2024-01-17",
"added_by_id": "9781449373320"
},
{
"Title": "Good-Bye to All That: An Autobiography",
"Author": "Robert Graves",
"ISBN": null,
"ISBN13": null,
"Publisher": "Anchor Books",
"Binding": "Paperback",
"Number of Pages": 347,
"Year Published": "1958",
"Original Publication Year": "1929",
"Date Added": "2013-05-07",
"Date Started": "2024-01-01",
"Read Count": 0
},
{
"Title": "The End of Policing",
"Author": "Alex S. Vitale",
"ISBN": null,
"ISBN13": null,
"Publisher": "Verso",
"Binding": "Kindle Edition",
"Number of Pages": 272,
"Year Published": "2017",
"Original Publication Year": "2017",
"Date Added": "2020-06-05",
"Date Started": "2024-01-01",
"Read Count": 0
},
{
"Title": "France",
"Series": "Lonely Planet",
"Author": "Lonely Planet",
"ISBN13": "9781788680513",
"Publisher": "Lonely Planet Global Limited",
"Binding": "Paperback",
"Number of Pages": 1021,
"Year Published": "2021",
"Original Publication Year": "1994",
"Date Added": "2024-01-02",
"Date Started": "2023-12-25",
"Read Count": 0
},
{
"Title": "The Design of Everyday Things",
"Author": "Donald A. Norman",
"ISBN": "0465067107",
"ISBN13": "9780465067107",
"Publisher": "Basic Books",
"Binding": "Paperback",
"Number of Pages": 240,
"Year Published": "2002",
"Original Publication Year": "1988",
"Date Added": "2021-12-01",
"Date Started": "2023-12-24",
"Read Count": 0
}
]

View File

@ -1,4 +1,51 @@
[
{
"title": "Le D\u00e9clic - Int\u00e9grale noir et blanc",
"authors": [
{
"id": "OL46833A",
"name": "Milo Manara"
}
],
"publish_date": "Sep 02, 2009",
"number_of_pages": 240,
"publishers": [
"\u00c9ditions Gl\u00e9nat"
],
"physical_format": "hardcover",
"covers": [
13995197
],
"languages": [
"fre"
],
"description": "Le monument de l'\u00e9rotisme en bande dessin\u00e9e.\r\n\r\nDans un nouveau format, dans une traduction r\u00e9vis\u00e9e et pr\u00e9sent\u00e9e avec un nouveau lettrage, retrouvez l'int\u00e9grale des quatre volumes du chef-d'oeuvre \u00e9rotique de Milo Manara. En parall\u00e8le \u00e0 la nouvelle \u00e9dition en couleurs des volumes unitaires grand format, voici en un volume unique, pour bibliophiles avertis, la BD culte dans sa pr\u00e9sentation d'origine en noir et blanc. Plus charnel, sensuel et torride que jamais, laissez-vous happer par le tourbillon des sens, et succombez aux d\u00e9lices du plaisir.",
"isbn_10": "2723472442",
"isbn_13": "9782723472449",
"contributors": [
{
"role": "Translator",
"name": "Aurore Schmid"
}
],
"work": {
"id": "OL608217W",
"title": "Il gioco",
"subjects": [
"Erotic literature",
"Fiction, erotica",
"Comics & graphic novels, general"
],
"original_language": "ita"
},
"id": "OL38885077M",
"published_in": "Grenoble",
"date_added": "2024-01-16",
"date_started": "2024-01-16",
"date_finished": "2024-01-16",
"added_by_id": "9782723472449",
"comments": "Read in French"
},
{
"title": "Le K\u00e2ma S\u00fbtra",
"publishers": [
@ -9570,4 +9617,4 @@
"Date Finished": "",
"Read Count": 1
}
]
]

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,15 @@
[
{
"id": 241100,
"name": "Mr Bates vs The Post Office",
"overview": "One of the greatest miscarriages of justice in British legal history where hundreds of innocent sub-postmasters and postmistresses were wrongly accused of theft, fraud and false accounting due to a defective IT system.",
"poster_path": "/cuJy9NE50upYqSqmft3zqleW7eb.jpg",
"first_air_date": "2024-01-01",
"origin_country": [
"GB"
],
"date_added": "2024-01-15"
},
{
"id": 133232,
"name": "Africa",
@ -2749,4 +2760,4 @@
{
"Show Title": "Yu-Gi-Oh: The Abridged Series"
}
]
]

View File

@ -9,20 +9,44 @@ import re
import requests
from urllib.request import urlopen
logging.basicConfig(filename='./logs/run.log', encoding='utf-8', level=logging.DEBUG)
def setup_logger(name = __name__):
logging.root.setLevel(logging.NOTSET)
logger = logging.getLogger(name)
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler('./logs/run.log')
c_handler.setLevel(logging.INFO)
f_handler.setLevel(logging.ERROR)
c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
logger.addHandler(c_handler)
logger.addHandler(f_handler)
return logger
logger = setup_logger()
load_dotenv()
TMDB_API_KEY = os.getenv('TMDB_API_KEY')
TVDB_API_KEY = os.getenv('TVDB_API_KEY')
if "" == TMDB_API_KEY: logging.error("TMDB API key not found")
if "" == TVDB_API_KEY: logging.error("TVDB API key not found")
if "" == TMDB_API_KEY: logger.error("TMDB API key not found")
if "" == TVDB_API_KEY: logger.error("TVDB API key not found")
def add_item_to_log(item_id, media_type, log):
"""Add a film, book, TV series or TV episode to a log"""
logging.info(f"Processing {item_id}")
logger.info(f"Processing {item_id}")
item = import_by_id(item_id, media_type)
@ -60,7 +84,7 @@ def add_item_to_log(item_id, media_type, log):
if 'y' != input("\nDoes this look correct? [y]: "): return
# Save changes
logging.info(f"Adding {media_type} to {log}")
logger.info(f"Adding {media_type} to {log}")
with open(f"./data/{media_type}/{log}.json", "r") as log_file:
log_items = json.load(log_file)
@ -70,7 +94,7 @@ def add_item_to_log(item_id, media_type, log):
with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4)
logging.info(f"Added {media_type} {item_id} to {log}")
logger.info(f"Added {media_type} {item_id} to {log}")
def import_by_id(import_id, media_type):
@ -100,7 +124,7 @@ def import_from_imdb_by_id(imdb_id, media_type):
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
logger.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
@ -123,7 +147,7 @@ def import_from_imdb_by_id(imdb_id, media_type):
raise Exception(f"Returned no results for {imdb_id}")
elif 1 < len(response_data):
logging.warning(f"Returned more than one {media_type} for ID '{imdb_id}'")
logger.warning(f"Returned more than one {media_type} for ID '{imdb_id}'")
print(f"Returned more than one {media_type} for ID '{imdb_id}':\n")
print(json.dumps(response_data, indent=4))
idx = input("\nEnter the index of the result to use: ")
@ -150,7 +174,7 @@ def import_from_openlibrary_by_id(isbn, media_type):
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
logger.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
@ -158,7 +182,7 @@ def import_from_openlibrary_by_id(isbn, media_type):
return
else:
raise Exception(f"Error {reponse.status_code}: {response.text}")
raise Exception(f"Error {response.status_code}: {response.text}")
item = json.loads(response.text)
@ -195,7 +219,7 @@ def import_from_openlibrary_by_ol_key(key):
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
logger.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
@ -203,7 +227,7 @@ def import_from_openlibrary_by_ol_key(key):
return
else:
raise Exception(f"Error {reponse.status_code}: {response.text}")
raise Exception(f"Error {response.status_code}: {response.text}")
item = json.loads(response.text)
@ -252,6 +276,7 @@ def cleanup_result(item, media_type):
'lc_classifications', # OpenLibrary
'local_id', # OpenLibrary
'ocaid', # OpenLibrary
'oclc_numbers', # OpenLibrary
'popularity', # TMDB
'production_code', # TMDB
'revision', # OpenLibrary
@ -362,7 +387,7 @@ def main():
add_item_to_log(imdb_id, media_type, log)
except Exception as error:
logging.error(repr(error))
logger.exception("Exception occurred")
print(error)

View File

@ -1,25 +1,26 @@
from dotenv import load_dotenv
import json
import logging
import os
import re
import requests
import time
from urllib.request import urlopen
from add_item import cleanup_result, import_by_id
from add_item import cleanup_result, import_by_id, setup_logger
logging.basicConfig(filename='./logs/run.log', encoding='utf-8', level=logging.DEBUG)
logger = setup_logger(__name__)
load_dotenv()
TMDB_API_KEY = os.getenv('TMDB_API_KEY')
TVDB_API_KEY = os.getenv('TVDB_API_KEY')
if "" == TMDB_API_KEY: logging.error("TMDB API key not found")
if "" == TVDB_API_KEY: logging.error("TVDB API key not found")
if "" == TMDB_API_KEY: logger.warning("TMDB API key not found")
if "" == TVDB_API_KEY: logger.warning("TVDB API key not found")
def process_log(media_type, log):
logging.info(f"Processing {media_type}/{log}")
"""Run through a log and call the appropriate API for each item found"""
logger.info(f"Processing {media_type}/{log}")
with open(f"./data/{media_type}/{log}.json", "r") as log_file:
log_items = json.load(log_file)
@ -33,7 +34,7 @@ def process_log(media_type, log):
elif 'tv-episodes' == media_type: item_title = item['Episode Title']
elif 'tv-series' == media_type: item_title = item['Show Title']
logging.debug(f"Processing {item_title}")
logger.debug(f"Processing {item_title}")
# Rename pre-existing fields
if 'Date Added' in item:
@ -71,7 +72,7 @@ def process_log(media_type, log):
season_no = None
episode_no = item['episode_number'][1:]
else:
logging.error(f"Invalid episode number format '{item['Episode Number']}'")
logger.error(f"Invalid episode number format '{item['Episode Number']}'")
return
log_item_values['season_number'] = season_no
@ -94,7 +95,7 @@ def process_log(media_type, log):
json.dump(log_items, log_file, indent=4)
else:
logging.warning(f"Skipped {item_title}")
logger.warning(f"Skipped {item_title}")
if log_items[i] is not None: log_items[i] |= log_item_values
@ -104,10 +105,12 @@ def process_log(media_type, log):
with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4)
logging.info(f"Finished processing {media_type}/{log}")
logger.info(f"Finished processing {media_type}/{log}")
def import_by_details(item, item_title, media_type):
"""Import an item when lacking a unique identifier"""
if media_type in ['films', 'tv-series']:
return import_from_tmdb_by_details(item, item_title, media_type)
@ -124,7 +127,7 @@ def import_by_details(item, item_title, media_type):
def import_from_tmdb_by_details(item, item_title, media_type):
"""Retrieve a film or TV series from TMDB using its title"""
logging.info(f"Processing {item_title}")
logger.info(f"Processing {item_title}")
api_url = f"https://api.themoviedb.org/3/search/{'movie' if 'films' == media_type else 'tv'}"
@ -141,12 +144,12 @@ def import_from_tmdb_by_details(item, item_title, media_type):
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
logger.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_from_tmdb_by_details(item)
else:
logging.error(response.text)
logger.error(response.text)
response_data = json.loads(response.text)['results']
@ -154,7 +157,7 @@ def import_from_tmdb_by_details(item, item_title, media_type):
return cleanup_result(response_data[0], media_type)
elif 0 == len(response_data):
logging.warning(f"Returned no {media_type} for {item_title}")
logger.warning(f"Returned no {media_type} for {item_title}")
elif 1 < len(response_data):
if 'films' == media_type: title_key = 'title'
@ -166,7 +169,7 @@ def import_from_tmdb_by_details(item, item_title, media_type):
return cleanup_result(response_data[0], media_type)
else:
logging.warning(f"Returned more than one {media_type} for '{item_title}':\n")
logger.warning(f"Returned more than one {media_type} for '{item_title}':\n")
print(json.dumps(response_data, indent=4))
idx = input("\nEnter the index of the result to use: ")
@ -175,7 +178,7 @@ def import_from_tmdb_by_details(item, item_title, media_type):
return cleanup_result(response_data[int(idx)], media_type)
except:
logging.error("Index invalid!")
logger.error("Index invalid!")
print("Index invalid!")
item['IMDB ID'] = input(f"Enter IMDB ID for {item_title}: ")
@ -183,38 +186,46 @@ def import_from_tmdb_by_details(item, item_title, media_type):
if '' != item['IMDB ID']:
return import_by_id(item['IMDB ID'], media_type)
else:
logging.warning(f"Skipped {item_title}")
logger.warning(f"Skipped {item_title}")
return item
media_type = ''
while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']:
media_type = input("Select media type [films|tv-episodes|tv-series|books]: ")
def main():
media_type = ''
while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']:
media_type = input("Select media type [films|tv-episodes|tv-series|books]: ")
if 'films' == media_type:
log = ''
while log not in ['log', 'wishlist']:
log = input ("Enter log to process [log|wishlist]:")
try:
if 'films' == media_type:
log = ''
while log not in ['log', 'wishlist']:
log = input ("Enter log to process [log|wishlist]:")
process_log(media_type, log)
process_log(media_type, log)
elif 'books' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:")
elif 'books' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:")
# TODO
# TODO
elif 'tv-episodes' == media_type:
process_log(media_type, 'log')
elif 'tv-episodes' == media_type:
process_log(media_type, 'log')
# TODO
# TODO
elif 'tv-series' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:")
elif 'tv-series' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:")
process_log(media_type, log)
process_log(media_type, log)
except Exception as error:
logger.exception("Exception occurred")
print(error)
if __name__ == "__main__":
main()