Compare commits

..

No commits in common. "5b92efcc98aef05117b710e29194332e5588d48b" and "3baaf67f510e90743ef566918df2def9ea920efe" have entirely different histories.

11 changed files with 780 additions and 1706 deletions

View file

@ -1,3 +0,0 @@
TMDB_API_KEY=
TVDB_API_KEY=
OPENLIBRARY_API_KEY=

2
.gitignore vendored
View file

@ -1,8 +1,6 @@
cgi-bin/ cgi-bin/
__pycache__/
.well-known/ .well-known/
resources/ resources/
node_modules/ node_modules/
public/ public/
logs/ logs/
.env

View file

@ -1,27 +1,4 @@
[ [
{
"id": 596179,
"title": "Climate Change: The Facts",
"original_language": "en",
"original_title": "Climate Change: The Facts",
"overview": "After one of the hottest years on record, Sir David Attenborough looks at the science of climate change and potential solutions to this global threat. Interviews with some of the world\u2019s leading climate scientists explore recent extreme weather conditions such as unprecedented storms and catastrophic wildfires. They also reveal what dangerous levels of climate change could mean for both human populations and the natural world in the future.",
"poster_path": "/dLsPnpMoj6CtBw8OHIrCm3QSGsL.jpg",
"release_date": "2019-04-18",
"date_added": "2024-01-14",
"date_watched": "2019-09-11",
"imdb_id": "tt10095266"
},
{
"id": 18491,
"title": "Neon Genesis Evangelion: The End of Evangelion",
"original_language": "ja",
"original_title": "\u65b0\u4e16\u7d00\u30a8\u30f4\u30a1\u30f3\u30b2\u30ea\u30aa\u30f3\u5287\u5834\u7248 Air\uff0f\u307e\u3054\u3053\u308d\u3092\u3001\u541b\u306b",
"overview": "The second of two theatrically released follow-ups to the Neon Genesis Evangelion series. Comprising of two alternate episodes which were first intended to take the place of episodes 25 and 26, this finale answers many of the questions surrounding the series, while also opening up some new possibilities.",
"poster_path": "/j6G24dqI4WgUtChhWjfnI4lnmiK.jpg",
"release_date": "1997-07-19",
"date_added": "2024-01-14",
"date_watched": "2024-01-14"
},
{ {
"id": 613, "id": 613,
"title": "Downfall", "title": "Downfall",

View file

@ -1,12 +0,0 @@
[
{
"id": 242807,
"name": "Skibidi Toilet",
"overview": "Skibidi Toilet is a apocalyptic series where camera-mans fight with the skibidi toilets.",
"poster_path": "/4YtVG3wrFYwt4JjQKiasqWdweLV.jpg",
"first_air_date": "2023-02-07",
"date_added": "2024-01-14",
"date_started": "2024-01-12",
"added_by_id": "tt27814427"
}
]

File diff suppressed because it is too large Load diff

455
data/tv/log-shows.json Normal file
View file

@ -0,0 +1,455 @@
[
{
"Show Title": "All Watched Over By Machines of Loving Grace"
},
{
"Show Title": "The Trap: What Happend to Our Dream of Freedom"
},
{
"Show Title": "Louis Theroux: Miami Mega Jail"
},
{
"Show Title": "The Power of Nightmares: The Rise of the Politics of Fear"
},
{
"Show Title": "Pandora's Box: A Fable from the Age of Science"
},
{
"Show Title": "Century of the Self"
},
{
"Show Title": "Scavengers Reign"
},
{
"Show Title": "Once Upon a Time in Northern Ireland"
},
{
"Show Title": "A Touch of Cloth"
},
{
"Show Title": "Adventure Time"
},
{
"Show Title": "Agents of Cracked"
},
{
"Show Title": "Ambulance"
},
{
"Show Title": "American Experience"
},
{
"Show Title": "Archer"
},
{
"Show Title": "Arrested Development"
},
{
"Show Title": "Band of Brothers"
},
{
"Show Title": "Battlestar Galactica"
},
{
"Show Title": "Battlestar Galactica: Blood & Chrome"
},
{
"Show Title": "BBS: The Documentary"
},
{
"Show Title": "Black Books"
},
{
"Show Title": "Black Dynamite"
},
{
"Show Title": "Black Mirror"
},
{
"Show Title": "Blackadder"
},
{
"Show Title": "Blue Planet II"
},
{
"Show Title": "Bluestone 42"
},
{
"Show Title": "Boardwalk Empire"
},
{
"Show Title": "Brass Eye"
},
{
"Show Title": "Bravest Warriors"
},
{
"Show Title": "Britz"
},
{
"Show Title": "Broadchurch"
},
{
"Show Title": "Broken Trail"
},
{
"Show Title": "Brooklyn Nine-Nine"
},
{
"Show Title": "Caprica"
},
{
"Show Title": "Cardiac Arrest"
},
{
"Show Title": "Charlie Brooker's Gameswipe"
},
{
"Show Title": "Charlie Brooker's Screen Wipe"
},
{
"Show Title": "Charlie Brooker's Weekly Wipe"
},
{
"Show Title": "Chernobyl"
},
{
"Show Title": "China, IL"
},
{
"Show Title": "Climate Change - The Facts"
},
{
"Show Title": "Community"
},
{
"Show Title": "Continuum"
},
{
"Show Title": "Cowboy Bebop"
},
{
"Show Title": "Dark Net"
},
{
"Show Title": "Dead Set"
},
{
"Show Title": "Deadwood"
},
{
"Show Title": "Death Note"
},
{
"Show Title": "Departures"
},
{
"Show Title": "Doctor Who"
},
{
"Show Title": "Dr. Horrible's Sing-Along Blog"
},
{
"Show Title": "Enlisted"
},
{
"Show Title": "Fargo"
},
{
"Show Title": "Firefly"
},
{
"Show Title": "Fist of Fun"
},
{
"Show Title": "Frisky Dingo"
},
{
"Show Title": "Futurama"
},
{
"Show Title": "Generation Kill"
},
{
"Show Title": "Ghost in the Shell: Stand Alone Complex"
},
{
"Show Title": "Gilmore Girls"
},
{
"Show Title": "Gilmore Girls: A Year in the Life"
},
{
"Show Title": "Halo 4: Forward Unto Dawn"
},
{
"Show Title": "Halt and Catch Fire"
},
{
"Show Title": "Hannibal"
},
{
"Show Title": "Happy Valley"
},
{
"Show Title": "Have I Got News for You"
},
{
"Show Title": "Horrifying Planet"
},
{
"Show Title": "How TV Ruined Your Life"
},
{
"Show Title": "Human Planet"
},
{
"Show Title": "Jam"
},
{
"Show Title": "John Adams"
},
{
"Show Title": "Justified"
},
{
"Show Title": "Lake Dredge Appraisal"
},
{
"Show Title": "Les Revenants"
},
{
"Show Title": "Life"
},
{
"Show Title": "Line Of Duty"
},
{
"Show Title": "Look Around You"
},
{
"Show Title": "Louie"
},
{
"Show Title": "Mad Men"
},
{
"Show Title": "Million Dollar Extreme Presents: World Peace"
},
{
"Show Title": "Monkey Dust"
},
{
"Show Title": "Mr. Robot"
},
{
"Show Title": "Mr. Show"
},
{
"Show Title": "My Name Is Earl"
},
{
"Show Title": "Nathan Barley"
},
{
"Show Title": "Never Mind the Buzzcocks"
},
{
"Show Title": "Newswipe With Charlie Brooker"
},
{
"Show Title": "No Gods, No Masters: A History of Anarchism"
},
{
"Show Title": "Now and Then, Here and There"
},
{
"Show Title": "Once Upon a Time in Iraq"
},
{
"Show Title": "Orange is the New Black"
},
{
"Show Title": "Orphan Black"
},
{
"Show Title": "Our Planet (2019)"
},
{
"Show Title": "Our War"
},
{
"Show Title": "Pandemic: How to Prevent an Outbreak"
},
{
"Show Title": "Peaky Blinders"
},
{
"Show Title": "Peep Show"
},
{
"Show Title": "Penn & Teller: Bullshit!"
},
{
"Show Title": "Person of Interest"
},
{
"Show Title": "Planet Earth II"
},
{
"Show Title": "Planetes"
},
{
"Show Title": "Pushing Daisies"
},
{
"Show Title": "Reaper"
},
{
"Show Title": "Red Dwarf"
},
{
"Show Title": "Reporters At War"
},
{
"Show Title": "Rick and Morty"
},
{
"Show Title": "Rome"
},
{
"Show Title": "Search Party"
},
{
"Show Title": "Serial Experiments Lain"
},
{
"Show Title": "Sex Education"
},
{
"Show Title": "Sex House"
},
{
"Show Title": "Silicon Valley"
},
{
"Show Title": "Small Axe"
},
{
"Show Title": "South Park"
},
{
"Show Title": "Spaced"
},
{
"Show Title": "SpongeBob SquarePants"
},
{
"Show Title": "Star Trek"
},
{
"Show Title": "Stephen Fry: Out There"
},
{
"Show Title": "Stewart Lee's Comedy Vehicle"
},
{
"Show Title": "Ted Lasso"
},
{
"Show Title": "Tenacious D"
},
{
"Show Title": "Texhnolyze"
},
{
"Show Title": "That Mitchell & Webb Look"
},
{
"Show Title": "The Abolitionists"
},
{
"Show Title": "The Boondocks"
},
{
"Show Title": "The Chosen"
},
{
"Show Title": "The Crown"
},
{
"Show Title": "The Day Today"
},
{
"Show Title": "The End of the F***ing World"
},
{
"Show Title": "The Fall"
},
{
"Show Title": "The Good Place"
},
{
"Show Title": "The Inbetweeners"
},
{
"Show Title": "The IT Crowd"
},
{
"Show Title": "The Office"
},
{
"Show Title": "The Pacific"
},
{
"Show Title": "The Prisoner"
},
{
"Show Title": "The Promise"
},
{
"Show Title": "The Simpsons"
},
{
"Show Title": "The Sopranos"
},
{
"Show Title": "The State (2017)"
},
{
"Show Title": "The Thick of It"
},
{
"Show Title": "The Whitest Kids U Know"
},
{
"Show Title": "The Windsors"
},
{
"Show Title": "The Wire"
},
{
"Show Title": "The Witcher"
},
{
"Show Title": "True Detective"
},
{
"Show Title": "Unsere Mütter, unsere Väter"
},
{
"Show Title": "Utopia"
},
{
"Show Title": "Warriors (1999)"
},
{
"Show Title": "Ways of Seeing"
},
{
"Show Title": "Wolf Hall"
},
{
"Show Title": "Yonderland"
}
]

View file

@ -9845,6 +9845,12 @@
"Date Watched": "2019-09-15", "Date Watched": "2019-09-15",
"Episode Number": "1x01" "Episode Number": "1x01"
}, },
{
"Series Title": "Climate Change - The Facts",
"Episode Title": "Climate Change - The Facts",
"Date Watched": "2019-09-11",
"Episode Number": "1x01"
},
{ {
"Series Title": "Last Week Tonight With John Oliver", "Series Title": "Last Week Tonight With John Oliver",
"Episode Title": "Episode 171", "Episode Title": "Episode 171",

View file

@ -1,88 +1,15 @@
# Script to add a new item to the log # Script to add a new item to the log
from datetime import datetime from datetime import datetime
from dotenv import load_dotenv
import json import json
import logging import logging
import os
import re import re
import requests import requests
from urllib.request import urlopen from urllib.request import urlopen
logging.basicConfig(filename='./logs/run.log', encoding='utf-8', level=logging.DEBUG) def import_film(imdb_id, log):
"""Import a film via the TMDB API, given an IMDB ID"""
load_dotenv() logging.info(f"Processing {imdb_id}")
TMDB_API_KEY = os.getenv('TMDB_API_KEY')
TVDB_API_KEY = os.getenv('TVDB_API_KEY')
OPENLIBRARY_API_KEY = os.getenv('OPENLIBRARY_API_KEY')
if "" == TMDB_API_KEY: logging.error("TMDB API key not found")
if "" == TVDB_API_KEY: logging.error("TVDB API key not found")
if "" == OPENLIBRARY_API_KEY: logging.error("OpenLibrary API key not found")
def add_item_to_log(item_id, media_type, log):
"""Add a film, book, TV series or TV episode to a log"""
logging.info(f"Processing {item_id}")
item = import_by_id(item_id, media_type)
if log in ['log', 'current']:
if 'log' == log:
date_watched = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_watched) is None:
date_watched = input("Enter date watched [YYYY-MM-DD, t for today]:")
if 't' == date_watched: date_watched = datetime.today().strftime('%Y-%m-%d')
item['date_watched'] = date_watched
elif 'current' == log:
date_started = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_started) is None:
date_started = input("Enter date started [YYYY-MM-DD, t for today]:")
if 't' == date_started: date_started = datetime.today().strftime('%Y-%m-%d')
item['date_started'] = date_started
is_rewatch = ''
while is_rewatch not in ['y', 'n']:
is_rewatch = input("Is this a rewatch? [y/n]:")
if 'y' == is_rewatch: item['is_rewatch'] = True
item['added_by_id'] = item_id
comments = input("Enter comments (optional):")
if '' != comments: item['comments'] = comments
# Validation step
correct = ''
print(f"{media_type} data to add:\n")
print(json.dumps(item, indent=4))
if 'y' != input("\nDoes this look correct? [y]: "): return
# Save changes
logging.info(f"Adding {media_type} to {log}")
with open(f"./data/{media_type}/{log}.json", "r") as log_file:
log_items = json.load(log_file)
log_items.insert(0, item)
with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4)
logging.info(f"Added {media_type} {item_id} to {log}")
def import_by_id(import_id, media_type):
if media_type in ['films', 'tv-series']:
return import_from_imdb_by_id(import_id, media_type)
elif media_type in ['tv-episodes']:
return #import_from_tvdb_by_id(import_id, media_type)
elif media_type in ['books']:
return #import_from_openlibrary_by_id(import_id, media_type)
def import_from_imdb_by_id(imdb_id, media_type):
"""Retrieve a film, TV show or TV episode from TMDB using an IMDB ID"""
api_url = f"https://api.themoviedb.org/3/find/{imdb_id}" api_url = f"https://api.themoviedb.org/3/find/{imdb_id}"
@ -92,7 +19,7 @@ def import_from_imdb_by_id(imdb_id, media_type):
params={ params={
'external_source': 'imdb_id' 'external_source': 'imdb_id'
}, },
headers={'Authorization': f"Bearer {TMDB_API_KEY}"} headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiI1NWQ2ZjY3YzJlOTQwMDI1NTFmN2VkNmEyZWVjM2E3NyIsInN1YiI6IjUxNWMyNzkxMTljMjk1MTQ0ZDAzZDM0NCIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.92eNKubJ_CORCIIlta30P9Qjg_Q9gPRFDTfG4gyz9kY'}
) )
# Process the response # Process the response
@ -100,61 +27,179 @@ def import_from_imdb_by_id(imdb_id, media_type):
logging.info(response.status_code) logging.info(response.status_code)
elif (429 == response.status_code): elif (429 == response.status_code):
time.sleep(2) time.sleep(2)
import_from_imdb_by_id(imdb_id, media_type) import_film(imdb_id)
return return
else: else:
logging.error(response.text) logging.error(response.text)
if ('films' == media_type): results_key = 'movie_results' response_data = json.loads(response.text)
elif ('tv-episodes' == media_type): results_key = 'TODO' if 1 == len(response_data['movie_results']):
elif ('tv-series' == media_type): results_key = 'tv_results' film = response_data['movie_results'][0]
elif 0 == len(response_data['movie_results']):
response_data = json.loads(response.text)[results_key]
if 1 == len(response_data):
item = response_data[0]
elif 0 == len(response_data):
logging.error(f"Returned no results for {imdb_id}") logging.error(f"Returned no results for {imdb_id}")
return return
elif 1 < len(response_data): elif 1 < len(response_data['movie_results']):
logging.warning(f"Returned more than one {media_type} for ID {imdb_id}") logging.warning(f"Returned more than one film for ID {imdb_id}")
print(f"Returned more than one {media_type} for ID {imdb_id}:\n") print(f"Returned more than one film for ID {imdb_id}:")
print(json.dumps(response_data, indent=4)) print(json.dumps(response_data['movie_results'], indent=4))
idx = input("\nEnter the index of the result to use: ") id = input("Enter the index of the result to use:")
try: try:
item = response_data[int(idx)] film = response_data['movie_results'][id]
except: except:
logging.error("Index invalid!") logging.error("Index invalid!")
print("Index invalid!") print("Index invalid!")
# Modify the returned result to add additional data # Modify the returned result to add additional data
return cleanup_result(item) film = cleanup_film(film)
if 'log' == log:
date_watched = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_watched) is None:
date_watched = input("Enter date watched [YYYY-MM-DD, t for today]:")
if 't' == date_watched: date_watched = datetime.today().strftime('%Y-%m-%d')
film['date_watched'] = date_watched
is_rewatch = ''
while is_rewatch not in ['y', 'n']:
is_rewatch = input("Is this a rewatch? [y/n]:")
if 'y' == is_rewatch: film['is_rewatch'] = True
comments = input("Enter comments (optional):")
if '' != comments: film['comments'] = comments
# Validation step
correct = ''
print("Film data to add:")
print(json.dumps(film, indent=4))
if 'y' != input("Does this look correct? [y]:"): return
# Save changes
logging.info('Adding film to log…')
with open(f"./data/films/{log}.json", "r") as films_log:
films = json.load(films_log)
films.insert(0, film)
with open(f"./data/films/{log}.json", "w") as films_log:
json.dump(films, films_log, indent=4)
logging.info(f"Added film {film['title']} ({film['release_date']}) to log {log}")
def cleanup_result(item): def import_tv_episode(imdb_id, log):
"""Process a film or TV episode returned by the TMDB API by removing unnecessary fields and adding others""" """Import a TV episode via the TMDB API, given an IMDB ID"""
logging.info(f"Processing {imdb_id}")
for field_name in ['adult', 'backdrop_path', 'episode_type', 'genre_ids', 'media_type', 'origin_country', 'popularity', 'production_code', 'runtime', 'still_path', 'video', 'vote_average', 'vote_count']: api_url = f"https://api.themoviedb.org/3/find/{imdb_id}"
if field_name in item: del item[field_name]
# TODO - select automatically # Sending API request
title_key = 'name' response = requests.get(
api_url,
params={
'external_source': 'imdb_id'
},
headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiI1NWQ2ZjY3YzJlOTQwMDI1NTFmN2VkNmEyZWVjM2E3NyIsInN1YiI6IjUxNWMyNzkxMTljMjk1MTQ0ZDAzZDM0NCIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.92eNKubJ_CORCIIlta30P9Qjg_Q9gPRFDTfG4gyz9kY'}
)
if f"original_{title_key}" in item and 'original_language' in item: # Process the response
if item[f"original_{title_key}"] == item[title_key] and item['original_language'] == 'en': if (200 == response.status_code):
del item[f"original_{title_key}"], item['original_language'] logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_film(imdb_id)
return
else:
logging.error(response.text)
if 'date_added' not in item: item['date_added'] = datetime.today().strftime('%Y-%m-%d') response_data = json.loads(response.text)
if 1 == len(response_data['tv_episode_results']):
return item tv_episode = response_data['tv_episode_results'][0]
elif 0 == len(response_data['tv_episode_results']):
logging.error(f"Returned no results for {imdb_id}")
return
elif 1 < len(response_data['tv_episode_results']):
logging.warning(f"Returned more than one TV episode for ID {imdb_id}")
print(f"Returned more than one TV episode for ID {imdb_id}:")
print(json.dumps(response_data['tv_episode_results'], indent=4))
id = input("Enter the index of the result to use:")
try:
tv_episode = response_data['tv_episode_results'][id]
except:
logging.error("Index invalid!")
print("Index invalid!")
def main(): # Modify the returned result to add additional data
tv_episode = cleanup_tv_episode(tv_episode)
if 'log' == log:
date_watched = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_watched) is None:
date_watched = input("Enter date watched [YYYY-MM-DD, t for today]:")
if 't' == date_watched: date_watched = datetime.today().strftime('%Y-%m-%d')
tv_episode['date_watched'] = date_watched
is_rewatch = ''
while is_rewatch not in ['y', 'n']:
is_rewatch = input("Is this a rewatch? [y/n]:")
if 'y' == is_rewatch: tv_episode['is_rewatch'] = True
comments = input("Enter comments (optional):")
if '' != comments: tv_episode['comments'] = comments
# Validation step
correct = ''
print("TV episode data to add:")
print(json.dumps(tv_episode, indent=4))
if 'y' != input("Does this look correct? [y]:"): return
# Save changes
logging.info('Adding TV episode to log…')
with open(f"./data/tv/{log}.json", "r") as tv_episodes_log:
tv_episodes = json.load(tv_episodes_log)
tv_episodes.insert(0, tv_episode)
with open(f"./data/tv/{log}.json", "w") as tv_episodes_log:
json.dump(tv_episodes, tv_episodes_log, indent=4)
logging.info(f"Added TV episode {tv_episode['name']} ({tv_episode['air_date']}) to log {log}")
def cleanup_film(film):
"""Process a film returned by the TMDB API by removing unnecessary fields and adding others"""
del film['adult'], film['backdrop_path'], film['genre_ids'], film['popularity'], film['video'], film['vote_average'], film['vote_count']
if 'media_type' in film: del film['media_type']
if film['original_title'] == film['title'] and film['original_language'] == 'en':
del film['original_title'], film['original_language']
film['date_added'] = datetime.today().strftime('%Y-%m-%d')
return film
def cleanup_tv_episode(tv_episode):
"""Process a TV episode returned by the TMDB API by removing unnecessary fields and adding others"""
# eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhZ2UiOiIiLCJhcGlrZXkiOiJlNGRiYmZhYi0wZmM3LTRkMmEtYjgyZi0wZmRmMjAwOTcwOGYiLCJjb21tdW5pdHlfc3VwcG9ydGVkIjpmYWxzZSwiZXhwIjoxNzA3NTQ1NzQ5LCJnZW5kZXIiOiIiLCJoaXRzX3Blcl9kYXkiOjEwMDAwMDAwMCwiaGl0c19wZXJfbW9udGgiOjEwMDAwMDAwMCwiaWQiOiIxNTE5NDMiLCJpc19tb2QiOmZhbHNlLCJpc19zeXN0ZW1fa2V5IjpmYWxzZSwiaXNfdHJ1c3RlZCI6ZmFsc2UsInBpbiI6bnVsbCwicm9sZXMiOltdLCJ0ZW5hbnQiOiJ0dmRiIiwidXVpZCI6IiJ9.gdBQ7q3GKhl-Sr5GjjKQ4X2lJEuS5povPkYciYTY4kr_NMy_w_qBUV1lAjR-OVOyh3EB_zjroT08JiUUOUJbRGGNBpr7ct1gJgaiqKOncwawZZHoQOZMUw-wX77rdAmW93XusX9vF3HyQGp6982E6AdUhfsdx4be8DWDtG3roKnxXiwD5dC_0_V7eB-fYdk3xWSkAjJ4u7JxZTvsKuCpKFJu5ag4HB13tEgo2wB6PR4Bea1ocv2n9BJLbJevUvz4GmS8zNMMLvOTg9kbxr_BGX77XT0UU8L3Nxr21RblHkFfiR3DrqAp-DdKBNa_r7W0-fa7LrZqHFRq8FlSfjDqp29-uS4zOPYx6DxiBOCO30h0mOEncwnjiWRKEbPHMO9i53J8rbyOykwLhx6O6q431BTNpB8RFhhk5_RxGZfYNwXNl0XgSQSxeJgM9z19G5ADOCr4fvyTAu3KvKbmMFqNRxblHWOLiqGQjMZpjwOizVLMcTxICEv4HY6Sf9hM_deETWERmagmChsj1VACLa7Yar8wABuoQDFV3dMbDijDeEZgBc7CZ9NmAlYFW2YlARlqzI3lyIAJz_WKpmxZM400gNlDICPVqhT4VNq8ZYA2_bfu8yJxbm6BLpgqw_IPP2VLzKoGN8dCmavU_QeET21GNeDXuad9XcqxmZl9K1wPJCA
del tv_episode['still_path'], tv_episode['vote_average'], tv_episode['vote_count'], tv_episode['episode_type'], tv_episode['production_code'], tv_episode['runtime']
if 'media_type' in tv_episode: del tv_episode['media_type']
tv_episode['date_added'] = datetime.today().strftime('%Y-%m-%d')
return tv_episode
logging.basicConfig(filename='./logs/run.log', encoding='utf-8', level=logging.DEBUG)
media_type = '' media_type = ''
while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']: while media_type not in ['film', 'tv', 'book']:
media_type = input("Select media type [films|tv-episodes|tv-series|books]:") media_type = input("Select media type [film|tv|book]:")
if 'films' == media_type: if 'film' == media_type:
log = '' log = ''
while log not in ['log', 'wishlist']: while log not in ['log', 'wishlist']:
log = input ("Enter log to update [log|wishlist]:") log = input ("Enter log to update [log|wishlist]:")
@ -163,9 +208,9 @@ def main():
while re.search("tt[0-9]+", imdb_id) is None: while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID:") imdb_id = input("Enter IMDB ID:")
add_item_to_log(imdb_id, media_type, log) import_film(imdb_id, log)
elif 'books' == media_type: elif 'book' == media_type:
log = '' log = ''
while log not in ['log', 'current', 'wishlist']: while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to update [log|current|wishlist]:") log = input ("Enter log to update [log|current|wishlist]:")
@ -174,24 +219,13 @@ def main():
while re.search("[0-9]+", isbn) is None: while re.search("[0-9]+", isbn) is None:
isbn = input("Enter ISBN:") isbn = input("Enter ISBN:")
elif 'tv-episodes' == media_type: elif 'tv' == media_type:
imdb_id = ''
while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID:")
add_item_to_log(imdb_id, media_type, 'log')
elif 'tv-series' == media_type:
log = '' log = ''
while log not in ['log', 'current', 'wishlist']: while log not in ['log', 'wishlist']:
log = input ("Enter log to update [log|current|wishlist]:") log = input ("Enter log to update [log|wishlist]:")
imdb_id = '' imdb_id = ''
while re.search("tt[0-9]+", imdb_id) is None: while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID:") imdb_id = input("Enter IMDB ID:")
add_item_to_log(imdb_id, media_type, log) import_tv_episode(imdb_id, log)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,143 @@
import json
import logging
import requests
import time
from urllib.request import urlopen
def process_items(items):
logging.info("Processing items…")
item_values = {}
for i, item in enumerate(items):
if 'id' not in item:
if 'Date Added' in item:
item_values['date_added'] = item['Date Added']
del item['Date Added']
if 'Date Watched' in item:
item_values['date_watched'] = item['Date Watched']
del item['Date Watched']
if 'Rewatch' in item:
item_values['is_rewatch'] = item['Rewatch']
del item['Rewatch']
if 'Comments' in item:
item_values['comments'] = item['Comments']
del item['Comments']
if 'IMDB ID' in item:
items[i] = populate_from_id(item)
else:
items[i] = populate_from_details(item)
items[i] |= item_values
with open("../data/films/wishlist.json", "w") as films_log:
json.dump(items, films_log, indent=4)
logging.info("Finished processing items")
def populate_from_details(item):
logging.info(f"Processing {item['Title']}")
api_url = f"https://api.themoviedb.org/3/search/movie"
# Sending API request
response = requests.get(
api_url,
params={
'query': item['Title'],
'include_adult': True,
'year': item['Release Year']
},
headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiI1NWQ2ZjY3YzJlOTQwMDI1NTFmN2VkNmEyZWVjM2E3NyIsInN1YiI6IjUxNWMyNzkxMTljMjk1MTQ0ZDAzZDM0NCIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.92eNKubJ_CORCIIlta30P9Qjg_Q9gPRFDTfG4gyz9kY'}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
populate_from_details(item)
else:
logging.error(response.text)
response_data = json.loads(response.text)
if 1 == len(response_data['results']):
film = response_data['results'][0]
return cleanup_film(film)
elif 0 == len(response_data['results']):
logging.warning(f"Returned no results for {item['Title']} ({item['Release Year']})")
elif 1 < len(response_data['results']):
response_data['results'] = [film for film in response_data['results'] if film['title'] == item['Title']]
if 1 < len(response_data['results']):
logging.warning(f"Returned more than one film for {item['Title']} ({item['Release Year']})")
item['IMDB ID'] = input(f"Enter IMDB ID for {item['Title']} ({item['Release Year']}):")
if item['IMDB ID'] != '':
return populate_from_id(item)
else:
logging.warning(f"Skipped {item['Title']} ({item['Release Year']})")
return item
def populate_from_id(item):
logging.info(f"Processing ID {item['IMDB ID']} ({item['Title']})…")
api_url = f"https://api.themoviedb.org/3/find/{item['IMDB ID']}"
# Sending API request
response = requests.get(
api_url,
params={
'external_source': 'imdb_id'
},
headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiI1NWQ2ZjY3YzJlOTQwMDI1NTFmN2VkNmEyZWVjM2E3NyIsInN1YiI6IjUxNWMyNzkxMTljMjk1MTQ0ZDAzZDM0NCIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.92eNKubJ_CORCIIlta30P9Qjg_Q9gPRFDTfG4gyz9kY'}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
populate_from_id(item)
else:
logging.error(response.text)
response_data = json.loads(response.text)
if len(response_data['movie_results']) > 1:
logging.warning(f"Returned more than one film for ID {item['IMDB ID']}")
return item
if len(response_data['movie_results']) > 0:
film = response_data['movie_results'][0]
return cleanup_film(film)
else:
logging.warning(f"Returning no results for {item['Title']}")
return item
return cleanup_film(film)
def cleanup_film(film):
del film['adult'], film['backdrop_path'], film['genre_ids'], film['popularity'], film['video'], film['vote_average'], film['vote_count']
if 'media_type' in film: del film['media_type']
if film['original_title'] == film['title'] and film['original_language'] == 'en':
del film['original_title'], film['original_language']
film['poster_path'] = f"https://www.themoviedb.org/t/p/original/{film['poster_path']}"
return film
logging.basicConfig(filename='../logs/run.log', encoding='utf-8', level=logging.DEBUG)
with open("../data/films/wishlist.json", "r") as films_log:
films = json.load(films_log)
process_items(films)

View file

@ -1,192 +0,0 @@
from dotenv import load_dotenv
import json
import logging
import os
import re
import requests
import time
from urllib.request import urlopen
from add_item import cleanup_result, import_by_id
logging.basicConfig(filename='./logs/run.log', encoding='utf-8', level=logging.DEBUG)
load_dotenv()
TMDB_API_KEY = os.getenv('TMDB_API_KEY')
TVDB_API_KEY = os.getenv('TVDB_API_KEY')
OPENLIBRARY_API_KEY = os.getenv('OPENLIBRARY_API_KEY')
if "" == TMDB_API_KEY: logging.error("TMDB API key not found")
if "" == TVDB_API_KEY: logging.error("TVDB API key not found")
if "" == OPENLIBRARY_API_KEY: logging.error("OpenLibrary API key not found")
def process_log(media_type, log):
logging.info(f"Processing {media_type}/{log}")
with open(f"./data/{media_type}/{log}.json", "r") as log_file:
log_items = json.load(log_file)
log_item_values = {}
for i, item in enumerate(log_items):
try:
if 'id' not in item:
if 'films' == media_type: item_title = item['Title']
elif 'tv-episodes' == media_type: item_title = item['Episode Title']
elif 'tv-series' == media_type: item_title = item['Show Title']
logging.debug(f"Processing {item_title}")
# Rename pre-existing fields
if 'Date Added' in item:
log_item_values['date_added'] = item['Date Added']
del item['Date Added']
if 'Date Watched' in item:
log_item_values['date_watched'] = item['Date Watched']
del item['Date Watched']
if 'Rewatch' in item:
log_item_values['is_rewatch'] = item['Rewatch']
del item['Rewatch']
if 'Comments' in item:
log_item_values['comments'] = item['Comments']
del item['Comments']
if 'Series Title' in item:
log_item_values['series_title'] = item['Series Title']
del item['Series Title']
if 'Episode Title' in item:
log_item_values['name'] = item['Episode Title']
del item['Episode Title']
if 'Episode Number' in item:
split_num = log_item_values['episode_number'].split("E")
log_item_values['episode_number'] = split_num[1]
log_item_values['season_number'] = split_num[0] or None
del item['Episode Number']
if 'IMDB ID' in item:
log_items[i] = import_by_id(item['IMDB ID'], media_type)
else:
log_items[i] = import_by_details(item, item_title, media_type)
if log_items[i] is None:
item['imdb_id'] = input(f"Enter IMDB ID for {item_title}: ")
if re.search("tt[0-9]+", item['imdb_id']) is not None:
log_items[i] = import_by_id(item['imdb_id'], media_type)
with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4)
else:
logging.warning(f"Skipped {item_title}")
if log_items[i] is not None: log_items[i] |= log_item_values
except KeyError:
print(json.dumps(item, indent=4))
with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4)
logging.info(f"Finished processing {media_type}/{log}")
def import_by_details(item, item_title, media_type):
if media_type in ['films', 'tv-series']:
return import_from_tmdb_by_details(item, item_title, media_type)
elif media_type in ['tv-episodes']:
return #import_from_tvdb_by_details(item, item_title, media_type)
elif media_type in ['books']:
return #import_from_openlibrary_by_details(item, item_title, media_type)
def import_from_tmdb_by_details(item, item_title, media_type):
"""Retrieve a film or TV series from TMDB using its title"""
logging.info(f"Processing {item_title}")
api_url = f"https://api.themoviedb.org/3/search/{'movie' if 'films' == media_type else 'tv'}"
# Sending API request
response = requests.get(
api_url,
params={
'query': item_title,
'include_adult': True,
'year': item['Release Year'] if 'Release Year' in item else None
},
headers={'Authorization': f"Bearer {TMDB_API_KEY}"}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_from_tmdb_by_details(item)
else:
logging.error(response.text)
response_data = json.loads(response.text)['results']
if 1 == len(response_data):
return cleanup_result(response_data[0])
elif 0 == len(response_data):
logging.warning(f"Returned no {media_type} for {item_title}")
elif 1 < len(response_data):
if 'films' == media_type: title_key = 'title'
elif 'tv-series' == media_type: title_key = 'name'
response_data = [result for result in response_data if result[title_key] == item_title]
if 1 == len(response_data):
return cleanup_result(response_data[0])
else:
logging.warning(f"Returned more than one {media_type} for '{item_title}':\n")
print(json.dumps(response_data, indent=4))
idx = input("\nEnter the index of the result to use: ")
if "" != idx:
try:
return cleanup_result(response_data[int(idx)])
except:
logging.error("Index invalid!")
print("Index invalid!")
item['IMDB ID'] = input(f"Enter IMDB ID for {item_title}: ")
if '' != item['IMDB ID']:
return import_by_id(item['IMDB ID'], media_type)
else:
logging.warning(f"Skipped {item_title}")
return item
media_type = ''
while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']:
media_type = input("Select media type [films|tv-episodes|tv-series|books]: ")
if 'films' == media_type:
log = ''
while log not in ['log', 'wishlist']:
log = input ("Enter log to process [log|wishlist]:")
process_log(media_type, log)
elif 'books' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:")
elif 'tv-episodes' == media_type:
process_log(media_type, 'log')
elif 'tv-series' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:")
process_log(media_type, log)