add book adding to script, update tv series'

This commit is contained in:
Ben Goldsworthy 2024-01-15 22:43:23 +01:00
parent 5b92efcc98
commit 06957e053f
11 changed files with 15654 additions and 14769 deletions

View file

@ -1,3 +1,3 @@
TMDB_API_KEY= TMDB_API_KEY=
TVDB_API_KEY= TVDB_API_KEY=
OPENLIBRARY_API_KEY=

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,9 @@
[ [
{
"Title": "Bottoms Up and the Devil Laughs",
"Author": "Kerry Howley",
"Date Added": "2024-01-14"
},
{ {
"Title": "Jonathan Abernathy You Are Kind", "Title": "Jonathan Abernathy You Are Kind",
"Author": "Molly McGhee", "Author": "Molly McGhee",

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,14 @@
[ [
{
"id": 93304,
"title": "Alternative 3",
"original_language": "en",
"original_title": "Alternative 3",
"overview": "Purporting to be an investigation into the UK's contemporary \"brain drain\", Alternative 3 uncovered a plan to make the Moon and Mars habitable in the event of climate change and a terminal environmental catastrophe on Earth.",
"poster_path": "/tcVu3RX3ZycwAlQhudKwvIguilM.jpg",
"release_date": "1977-06-20",
"date_added": "2024-01-14"
},
{ {
"id": 786892, "id": 786892,
"title": "Furiosa: A Mad Max Saga", "title": "Furiosa: A Mad Max Saga",

View file

@ -1,4 +1,12 @@
[ [
{
"Title": "That's Pretty Clever",
"Developers": "Wolfgang Warsch",
"Platform": "Board",
"Date Started": "2024-01-13",
"Date Finished": "2024-01-13",
"BGG ID": "244522"
},
{ {
"Title": "Cacho Alalay", "Title": "Cacho Alalay",
"Platform": "Board", "Platform": "Board",
@ -9,7 +17,7 @@
"Title": "Cheating Moth", "Title": "Cheating Moth",
"Developers": "Emely Brand and Lukas Brand", "Developers": "Emely Brand and Lukas Brand",
"Platform": "Board", "Platform": "Board",
"BBG ID": "105593", "BGG ID": "105593",
"Date Released": "2011", "Date Released": "2011",
"Date Started": "2024-01-06", "Date Started": "2024-01-06",
"Date Finished": "2024-01-06" "Date Finished": "2024-01-06"

View file

@ -1,4 +1,16 @@
[ [
{
"id": 2316,
"name": "The Office",
"overview": "The everyday lives of office employees in the Scranton, Pennsylvania branch of the fictional Dunder Mifflin Paper Company.",
"poster_path": "/7DJKHzAi83BmQrWLrYYOqcoKfhR.jpg",
"first_air_date": "2005-03-24",
"origin_country": [
"US"
],
"date_added": "2024-01-14",
"added_by_id": "tt0386676"
},
{ {
"id": 44045, "id": 44045,
"origin_country": [ "origin_country": [

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@
<tr> <tr>
<td colspan=4><h3>Diary</h3></td> <td colspan=4><h3>Diary</h3></td>
</tr> </tr>
{{ range ( sort ( where $.Site.Data.films.log "date_watched" "!=" "" ) "date_watched" "desc" ) }} {{ range ( sort ( where $.Site.Data.films.log "date_completed" "!=" "" ) "date_completed" "desc" ) }}
<tr> <tr>
<td>{{ with .date_added }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td> <td>{{ with .date_added }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td>
<td> <td>
@ -10,9 +10,9 @@
</td> </td>
<td>{{ with .release_date }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td> <td>{{ with .release_date }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td>
<td> <td>
{{- if .date_watched -}} {{- if .date_completed -}}
{{- time.Format "Jan 2, 2006" .date_watched -}} {{- time.Format "Jan 2, 2006" .date_completed -}}
{{- if .is_rewatch }} &#x21BB;{{ end -}} {{- if .is_repeat }} &#x21BB;{{ end -}}
{{- else -}} {{- else -}}
n/a n/a
{{- end -}} {{- end -}}
@ -22,7 +22,7 @@
<tr> <tr>
<td colspan=4><h3>Assorted</h3></td> <td colspan=4><h3>Assorted</h3></td>
</tr> </tr>
{{ range ( sort ( where $.Site.Data.films.log "date_watched" "" ) "title" "asc" ) }} {{ range ( sort ( where $.Site.Data.films.log "date_completed" "" ) "title" "asc" ) }}
<tr> <tr>
<td>{{ with .date_added }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td> <td>{{ with .date_added }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td>
<td> <td>
@ -38,9 +38,9 @@
</td> </td>
<td>{{ with .release_date }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td> <td>{{ with .release_date }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td>
<td> <td>
{{- if .date_watched -}} {{- if .date_completed -}}
{{- time.Format "Jan 2, 2006" .date_watched -}} {{- time.Format "Jan 2, 2006" .date_completed -}}
{{- if .is_rewatch }} &#x21BB;{{ end -}} {{- if .is_repeat }} &#x21BB;{{ end -}}
{{- else -}} {{- else -}}
n/a n/a
{{- end -}} {{- end -}}
@ -64,9 +64,9 @@
</td> </td>
<td>{{ with .release_date }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td> <td>{{ with .release_date }}{{ time.Format "Jan 2, 2006" . }}{{ end }}</td>
<td> <td>
{{- if .date_watched -}} {{- if .date_completed -}}
{{- time.Format "Jan 2, 2006" .date_watched -}} {{- time.Format "Jan 2, 2006" .date_completed -}}
{{- if .is_rewatch }} &#x21BB;{{ end -}} {{- if .is_repeat }} &#x21BB;{{ end -}}
{{- else -}} {{- else -}}
n/a n/a
{{- end -}} {{- end -}}

View file

@ -15,11 +15,9 @@ load_dotenv()
TMDB_API_KEY = os.getenv('TMDB_API_KEY') TMDB_API_KEY = os.getenv('TMDB_API_KEY')
TVDB_API_KEY = os.getenv('TVDB_API_KEY') TVDB_API_KEY = os.getenv('TVDB_API_KEY')
OPENLIBRARY_API_KEY = os.getenv('OPENLIBRARY_API_KEY')
if "" == TMDB_API_KEY: logging.error("TMDB API key not found") if "" == TMDB_API_KEY: logging.error("TMDB API key not found")
if "" == TVDB_API_KEY: logging.error("TVDB API key not found") if "" == TVDB_API_KEY: logging.error("TVDB API key not found")
if "" == OPENLIBRARY_API_KEY: logging.error("OpenLibrary API key not found")
def add_item_to_log(item_id, media_type, log): def add_item_to_log(item_id, media_type, log):
@ -29,27 +27,30 @@ def add_item_to_log(item_id, media_type, log):
item = import_by_id(item_id, media_type) item = import_by_id(item_id, media_type)
if log in ['log', 'current']: if log in ['log', 'current']:
if 'log' == log:
date_watched = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_watched) is None:
date_watched = input("Enter date watched [YYYY-MM-DD, t for today]:")
if 't' == date_watched: date_watched = datetime.today().strftime('%Y-%m-%d')
item['date_watched'] = date_watched
elif 'current' == log: # TODO - review this when moving from one log to another
if media_type in ['books', 'tv-series', 'games']:
date_started = '' date_started = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_started) is None: while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_started) is None:
date_started = input("Enter date started [YYYY-MM-DD, t for today]:") date_started = input("Enter date started [YYYY-MM-DD, t for today]: ")
if 't' == date_started: date_started = datetime.today().strftime('%Y-%m-%d') if 't' == date_started: date_started = datetime.today().strftime('%Y-%m-%d')
item['date_started'] = date_started item['date_started'] = date_started
is_rewatch = '' if 'log' == log:
while is_rewatch not in ['y', 'n']: date_finished = ''
is_rewatch = input("Is this a rewatch? [y/n]:") while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_finished) is None:
if 'y' == is_rewatch: item['is_rewatch'] = True date_finished = input("Enter date finished [YYYY-MM-DD, t for today]: ")
if 't' == date_finished: date_finished = datetime.today().strftime('%Y-%m-%d')
item['date_finished'] = date_finished
# TODO - do this automatically
is_repeat = ''
while is_repeat not in ['y', 'n']:
is_repeat = input(f"Is this a repeat entry? [y/n]: ")
if 'y' == is_repeat: item['is_repeat'] = True
item['added_by_id'] = item_id item['added_by_id'] = item_id
comments = input("Enter comments (optional):") comments = input("Enter comments (optional): ")
if '' != comments: item['comments'] = comments if '' != comments: item['comments'] = comments
# Validation step # Validation step
@ -75,10 +76,12 @@ def add_item_to_log(item_id, media_type, log):
def import_by_id(import_id, media_type): def import_by_id(import_id, media_type):
if media_type in ['films', 'tv-series']: if media_type in ['films', 'tv-series']:
return import_from_imdb_by_id(import_id, media_type) return import_from_imdb_by_id(import_id, media_type)
elif media_type in ['tv-episodes']: elif media_type in ['tv-episodes']:
return #import_from_tvdb_by_id(import_id, media_type) return #import_from_tvdb_by_id(import_id, media_type)
elif media_type in ['books']: elif media_type in ['books']:
return #import_from_openlibrary_by_id(import_id, media_type) return import_from_openlibrary_by_id(import_id, media_type)
def import_from_imdb_by_id(imdb_id, media_type): def import_from_imdb_by_id(imdb_id, media_type):
@ -98,12 +101,14 @@ def import_from_imdb_by_id(imdb_id, media_type):
# Process the response # Process the response
if (200 == response.status_code): if (200 == response.status_code):
logging.info(response.status_code) logging.info(response.status_code)
elif (429 == response.status_code): elif (429 == response.status_code):
time.sleep(2) time.sleep(2)
import_from_imdb_by_id(imdb_id, media_type) import_from_imdb_by_id(imdb_id, media_type)
return return
else: else:
logging.error(response.text) raise Exception(f"Error {response.status_code}: {response.text}")
if ('films' == media_type): results_key = 'movie_results' if ('films' == media_type): results_key = 'movie_results'
elif ('tv-episodes' == media_type): results_key = 'TODO' elif ('tv-episodes' == media_type): results_key = 'TODO'
@ -113,36 +118,197 @@ def import_from_imdb_by_id(imdb_id, media_type):
if 1 == len(response_data): if 1 == len(response_data):
item = response_data[0] item = response_data[0]
elif 0 == len(response_data): elif 0 == len(response_data):
logging.error(f"Returned no results for {imdb_id}") raise Exception(f"Returned no results for {imdb_id}")
return
elif 1 < len(response_data): elif 1 < len(response_data):
logging.warning(f"Returned more than one {media_type} for ID {imdb_id}") logging.warning(f"Returned more than one {media_type} for ID '{imdb_id}'")
print(f"Returned more than one {media_type} for ID {imdb_id}:\n") print(f"Returned more than one {media_type} for ID '{imdb_id}':\n")
print(json.dumps(response_data, indent=4)) print(json.dumps(response_data, indent=4))
idx = input("\nEnter the index of the result to use: ") idx = input("\nEnter the index of the result to use: ")
try: try:
item = response_data[int(idx)] item = response_data[int(idx)]
except: except:
logging.error("Index invalid!") raise Exception(f"Index {idx} is invalid")
print("Index invalid!")
# Modify the returned result to add additional data # Modify the returned result to add additional data
return cleanup_result(item) return cleanup_result(item, media_type)
def cleanup_result(item): def import_from_openlibrary_by_id(isbn, media_type):
"""Process a film or TV episode returned by the TMDB API by removing unnecessary fields and adding others""" """Retrieve a film, TV show or TV episode from TMDB using an IMDB ID"""
for field_name in ['adult', 'backdrop_path', 'episode_type', 'genre_ids', 'media_type', 'origin_country', 'popularity', 'production_code', 'runtime', 'still_path', 'video', 'vote_average', 'vote_count']: api_url = f"https://openlibrary.org/isbn/{isbn}"
# Sending API request
response = requests.get(
api_url,
headers={'accept': 'application/json'}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_from_openlibrary_by_id(isbn, media_type)
return
else:
raise Exception(f"Error {reponse.status_code}: {response.text}")
item = json.loads(response.text)
for key in ['authors', 'works']:
if key in item:
for i, sub_item in enumerate(item[key]):
item[key][i] = import_from_openlibrary_by_ol_key(sub_item['key'])
if 'works' in item:
if len(item['works']) > 1:
raise Exception(f"Multiple works found for {isbn}")
else:
item['work'] = item['works'][0]
del item['works']
# Modify the returned result to add additional data
return cleanup_result(item, media_type)
def import_from_openlibrary_by_ol_key(key):
"""Retrieves an item (author or work) from OpenLibrary using an OL key"""
_, mode, ol_id = key.split('/')
if mode in ['works', 'authors']:
api_url = f"https://openlibrary.org{key}"
# Sending API request
response = requests.get(
api_url,
headers={'accept': 'application/json'}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_from_openlibrary_by_ol_key(key)
return
else:
raise Exception(f"Error {reponse.status_code}: {response.text}")
item = json.loads(response.text)
if 'authors' == mode:
author = {
'id': ol_id,
'name': item['name']
}
if 'personal_name' in item:
if item['name'] != item['personal_name']: author['personal_name'] = item['personal_name']
return author
elif 'works' == mode:
work = {
'id': ol_id,
'title': item['title']
}
for key in ['first_publish_date', 'subjects']:
if key in item: work[key] = item[key]
return work
else:
raise Exception(f"Unknown OpenLibrary key '{mode}'")
def cleanup_result(item, media_type):
"""Process a film, TV series, TV episode or book returned by their respecitve APIs by removing unnecessary fields and adding others"""
for field_name in [
'adult', # TMDB
'backdrop_path', # TMDB
'copyright_date', # OpenLibrary
'classifications', # OpenLibrary
'created', # OpenLibrary
'episode_type', # TMDB
'first_sentence', # OpenLibrary
'genre_ids', # TMDB
'identifiers', # OpenLibrary
'media_type', # TMDB
'last_modified', # OpenLibrary
'latest_revision', # OpenLibrary
'lc_classifications', # OpenLibrary
'local_id', # OpenLibrary
'ocaid', # OpenLibrary
'popularity', # TMDB
'production_code', # TMDB
'revision', # OpenLibrary
'runtime', # TMDB
'source_records', # OpenLibrary
'still_path', # TMDB
'type', # OpenLibrary
'video', # TMDB
'vote_average', # TMDB
'vote_count' # TMDB
]:
if field_name in item: del item[field_name] if field_name in item: del item[field_name]
# TODO - select automatically if media_type in ['films', 'tv-series']:
title_key = 'name' title_key = 'name' if 'tv-series' == media_type else 'title'
if f"original_{title_key}" in item and 'original_language' in item:
if item[f"original_{title_key}"] == item[title_key] and item['original_language'] == 'en':
del item[f"original_{title_key}"], item['original_language']
if 'books' == media_type:
_, _, item['id'] = item['key'].split('/')
del item['key']
for key in ['isbn_10', 'isbn_13']:
if key in item:
if len(item[key]) > 1:
raise Exception("Multiple ISBN results")
else:
item[key] = item[key][0]
if 'publish_places' in item:
if len(item['publish_places']) > 1:
raise Exception("Multiple publish_places")
else:
item['published_in'] = item['publish_places'][0]
del item['publish_places']
if 'languages' in item:
item['languages'] = [lang['key'].split('/')[2] for lang in item['languages']]
if 'translation_of' in item:
if item['translation_of'] == item['work']['title']:
del item['translation_of']
else:
raise Exception(f"translation_of '{item['translation_of']}' is different to work title '{item['work']['title']}'")
if 'translated_from' in item:
if len(item['translated_from']) > 1:
raise Exception("Multiple translated_from results")
else:
item['work']['original_language'] = item['translated_from'][0]['key'].split('/')[2]
del item['translated_from']
if f"original_{title_key}" in item and 'original_language' in item:
if item[f"original_{title_key}"] == item[title_key] and item['original_language'] == 'en':
del item[f"original_{title_key}"], item['original_language']
if 'date_added' not in item: item['date_added'] = datetime.today().strftime('%Y-%m-%d') if 'date_added' not in item: item['date_added'] = datetime.today().strftime('%Y-%m-%d')
@ -152,45 +318,52 @@ def cleanup_result(item):
def main(): def main():
media_type = '' media_type = ''
while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']: while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']:
media_type = input("Select media type [films|tv-episodes|tv-series|books]:") media_type = input("Select media type [films|tv-episodes|tv-series|books]: ")
if 'films' == media_type: try:
log = '' if 'films' == media_type:
while log not in ['log', 'wishlist']: log = ''
log = input ("Enter log to update [log|wishlist]:") while log not in ['log', 'wishlist']:
log = input ("Enter log to update [log|wishlist]: ")
imdb_id = '' imdb_id = ''
while re.search("tt[0-9]+", imdb_id) is None: while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID:") imdb_id = input("Enter IMDB ID: ")
add_item_to_log(imdb_id, media_type, log) add_item_to_log(imdb_id, media_type, log)
elif 'books' == media_type: elif 'books' == media_type:
log = '' log = ''
while log not in ['log', 'current', 'wishlist']: while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to update [log|current|wishlist]:") log = input ("Enter log to update [log|current|wishlist]: ")
isbn = '' isbn = ''
while re.search("[0-9]+", isbn) is None: while re.search("[0-9]+", isbn) is None:
isbn = input("Enter ISBN:") isbn = input("Enter ISBN: ")
elif 'tv-episodes' == media_type: add_item_to_log(isbn, media_type, log)
imdb_id = ''
while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID:")
add_item_to_log(imdb_id, media_type, 'log') elif 'tv-episodes' == media_type:
imdb_id = ''
while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID: ")
elif 'tv-series' == media_type: add_item_to_log(imdb_id, media_type, 'log')
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to update [log|current|wishlist]:")
imdb_id = '' elif 'tv-series' == media_type:
while re.search("tt[0-9]+", imdb_id) is None: log = ''
imdb_id = input("Enter IMDB ID:") while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to update [log|current|wishlist]: ")
add_item_to_log(imdb_id, media_type, log) imdb_id = ''
while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID: ")
add_item_to_log(imdb_id, media_type, log)
except Exception as error:
logging.error(repr(error))
print(error)
if __name__ == "__main__": if __name__ == "__main__":

View file

@ -14,11 +14,9 @@ load_dotenv()
TMDB_API_KEY = os.getenv('TMDB_API_KEY') TMDB_API_KEY = os.getenv('TMDB_API_KEY')
TVDB_API_KEY = os.getenv('TVDB_API_KEY') TVDB_API_KEY = os.getenv('TVDB_API_KEY')
OPENLIBRARY_API_KEY = os.getenv('OPENLIBRARY_API_KEY')
if "" == TMDB_API_KEY: logging.error("TMDB API key not found") if "" == TMDB_API_KEY: logging.error("TMDB API key not found")
if "" == TVDB_API_KEY: logging.error("TVDB API key not found") if "" == TVDB_API_KEY: logging.error("TVDB API key not found")
if "" == OPENLIBRARY_API_KEY: logging.error("OpenLibrary API key not found")
def process_log(media_type, log): def process_log(media_type, log):
logging.info(f"Processing {media_type}/{log}") logging.info(f"Processing {media_type}/{log}")
@ -41,29 +39,48 @@ def process_log(media_type, log):
if 'Date Added' in item: if 'Date Added' in item:
log_item_values['date_added'] = item['Date Added'] log_item_values['date_added'] = item['Date Added']
del item['Date Added'] del item['Date Added']
if 'Date Watched' in item: if 'Date Watched' in item:
log_item_values['date_watched'] = item['Date Watched'] log_item_values['date_finished'] = item['Date Watched']
del item['Date Watched'] del item['Date Watched']
if 'Rewatch' in item: if 'Rewatch' in item:
log_item_values['is_rewatch'] = item['Rewatch'] log_item_values['is_repeat'] = item['Rewatch']
del item['Rewatch'] del item['Rewatch']
if 'Comments' in item: if 'Comments' in item:
log_item_values['comments'] = item['Comments'] log_item_values['comments'] = item['Comments']
del item['Comments'] del item['Comments']
if 'Series Title' in item: if 'Series Title' in item:
log_item_values['series_title'] = item['Series Title'] log_item_values['series_title'] = item['Series Title']
del item['Series Title'] del item['Series Title']
if 'Episode Title' in item: if 'Episode Title' in item:
log_item_values['name'] = item['Episode Title'] log_item_values['name'] = item['Episode Title']
del item['Episode Title'] del item['Episode Title']
if 'Episode Number' in item: if 'Episode Number' in item:
split_num = log_item_values['episode_number'].split("E") if re.search("[0-9]+x[0-9]+", item['Episode Number']) is not None:
log_item_values['episode_number'] = split_num[1] season_no, _, episode_no = log_item_values['episode_number'].split("x")
log_item_values['season_number'] = split_num[0] or None
elif re.search("S[0-9]+E[0-9]+", item['Episode Number']) is not None:
season_no, _, episode_no = log_item_values['episode_number'].split("E")
elif re.search("E[0-9]+", item['Episode Number']) is not None:
season_no = None
episode_no = item['episode_number'][1:]
else:
logging.error(f"Invalid episode number format '{item['Episode Number']}'")
return
log_item_values['season_number'] = season_no
log_item_values['episode_number'] = episode_no
del item['Episode Number'] del item['Episode Number']
if 'IMDB ID' in item: if 'IMDB ID' in item:
log_items[i] = import_by_id(item['IMDB ID'], media_type) log_items[i] = import_by_id(item['IMDB ID'], media_type)
else: else:
log_items[i] = import_by_details(item, item_title, media_type) log_items[i] = import_by_details(item, item_title, media_type)
@ -75,6 +92,7 @@ def process_log(media_type, log):
with open(f"./data/{media_type}/{log}.json", "w") as log_file: with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4) json.dump(log_items, log_file, indent=4)
else: else:
logging.warning(f"Skipped {item_title}") logging.warning(f"Skipped {item_title}")
@ -92,11 +110,16 @@ def process_log(media_type, log):
def import_by_details(item, item_title, media_type): def import_by_details(item, item_title, media_type):
if media_type in ['films', 'tv-series']: if media_type in ['films', 'tv-series']:
return import_from_tmdb_by_details(item, item_title, media_type) return import_from_tmdb_by_details(item, item_title, media_type)
elif media_type in ['tv-episodes']: elif media_type in ['tv-episodes']:
return #import_from_tvdb_by_details(item, item_title, media_type) return #import_from_tvdb_by_details(item, item_title, media_type)
elif media_type in ['books']: elif media_type in ['books']:
return #import_from_openlibrary_by_details(item, item_title, media_type) return #import_from_openlibrary_by_details(item, item_title, media_type)
elif media_type in ['games']:
return #import_from_igdb_by_details(item, item_title, media_type)
def import_from_tmdb_by_details(item, item_title, media_type): def import_from_tmdb_by_details(item, item_title, media_type):
"""Retrieve a film or TV series from TMDB using its title""" """Retrieve a film or TV series from TMDB using its title"""
@ -128,7 +151,7 @@ def import_from_tmdb_by_details(item, item_title, media_type):
response_data = json.loads(response.text)['results'] response_data = json.loads(response.text)['results']
if 1 == len(response_data): if 1 == len(response_data):
return cleanup_result(response_data[0]) return cleanup_result(response_data[0], media_type)
elif 0 == len(response_data): elif 0 == len(response_data):
logging.warning(f"Returned no {media_type} for {item_title}") logging.warning(f"Returned no {media_type} for {item_title}")
@ -140,7 +163,7 @@ def import_from_tmdb_by_details(item, item_title, media_type):
response_data = [result for result in response_data if result[title_key] == item_title] response_data = [result for result in response_data if result[title_key] == item_title]
if 1 == len(response_data): if 1 == len(response_data):
return cleanup_result(response_data[0]) return cleanup_result(response_data[0], media_type)
else: else:
logging.warning(f"Returned more than one {media_type} for '{item_title}':\n") logging.warning(f"Returned more than one {media_type} for '{item_title}':\n")
@ -149,7 +172,8 @@ def import_from_tmdb_by_details(item, item_title, media_type):
if "" != idx: if "" != idx:
try: try:
return cleanup_result(response_data[int(idx)]) return cleanup_result(response_data[int(idx)], media_type)
except: except:
logging.error("Index invalid!") logging.error("Index invalid!")
print("Index invalid!") print("Index invalid!")
@ -179,9 +203,13 @@ while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']:
while log not in ['log', 'current', 'wishlist']: while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:") log = input ("Enter log to process [log|current|wishlist]:")
# TODO
elif 'tv-episodes' == media_type: elif 'tv-episodes' == media_type:
process_log(media_type, 'log') process_log(media_type, 'log')
# TODO
elif 'tv-series' == media_type: elif 'tv-series' == media_type:
log = '' log = ''
while log not in ['log', 'current', 'wishlist']: while log not in ['log', 'current', 'wishlist']: