Compare commits

..

3 commits

Author SHA1 Message Date
5b92efcc98 process tv series' 2024-01-14 16:11:01 +01:00
d3dd9a1051 update scripts 2024-01-14 15:00:07 +01:00
8281a25562 reorg and update logs 2024-01-14 14:59:36 +01:00
11 changed files with 1743 additions and 817 deletions

3
.env.example Normal file
View file

@ -0,0 +1,3 @@
TMDB_API_KEY=
TVDB_API_KEY=
OPENLIBRARY_API_KEY=

2
.gitignore vendored
View file

@ -1,6 +1,8 @@
cgi-bin/
__pycache__/
.well-known/
resources/
node_modules/
public/
logs/
.env

View file

@ -1,4 +1,27 @@
[
{
"id": 596179,
"title": "Climate Change: The Facts",
"original_language": "en",
"original_title": "Climate Change: The Facts",
"overview": "After one of the hottest years on record, Sir David Attenborough looks at the science of climate change and potential solutions to this global threat. Interviews with some of the world\u2019s leading climate scientists explore recent extreme weather conditions such as unprecedented storms and catastrophic wildfires. They also reveal what dangerous levels of climate change could mean for both human populations and the natural world in the future.",
"poster_path": "/dLsPnpMoj6CtBw8OHIrCm3QSGsL.jpg",
"release_date": "2019-04-18",
"date_added": "2024-01-14",
"date_watched": "2019-09-11",
"imdb_id": "tt10095266"
},
{
"id": 18491,
"title": "Neon Genesis Evangelion: The End of Evangelion",
"original_language": "ja",
"original_title": "\u65b0\u4e16\u7d00\u30a8\u30f4\u30a1\u30f3\u30b2\u30ea\u30aa\u30f3\u5287\u5834\u7248 Air\uff0f\u307e\u3054\u3053\u308d\u3092\u3001\u541b\u306b",
"overview": "The second of two theatrically released follow-ups to the Neon Genesis Evangelion series. Comprising of two alternate episodes which were first intended to take the place of episodes 25 and 26, this finale answers many of the questions surrounding the series, while also opening up some new possibilities.",
"poster_path": "/j6G24dqI4WgUtChhWjfnI4lnmiK.jpg",
"release_date": "1997-07-19",
"date_added": "2024-01-14",
"date_watched": "2024-01-14"
},
{
"id": 613,
"title": "Downfall",

View file

@ -9845,12 +9845,6 @@
"Date Watched": "2019-09-15",
"Episode Number": "1x01"
},
{
"Series Title": "Climate Change - The Facts",
"Episode Title": "Climate Change - The Facts",
"Date Watched": "2019-09-11",
"Episode Number": "1x01"
},
{
"Series Title": "Last Week Tonight With John Oliver",
"Episode Title": "Episode 171",

View file

@ -0,0 +1,12 @@
[
{
"id": 242807,
"name": "Skibidi Toilet",
"overview": "Skibidi Toilet is a apocalyptic series where camera-mans fight with the skibidi toilets.",
"poster_path": "/4YtVG3wrFYwt4JjQKiasqWdweLV.jpg",
"first_air_date": "2023-02-07",
"date_added": "2024-01-14",
"date_started": "2024-01-12",
"added_by_id": "tt27814427"
}
]

1332
data/tv-series/log.json Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,455 +0,0 @@
[
{
"Show Title": "All Watched Over By Machines of Loving Grace"
},
{
"Show Title": "The Trap: What Happend to Our Dream of Freedom"
},
{
"Show Title": "Louis Theroux: Miami Mega Jail"
},
{
"Show Title": "The Power of Nightmares: The Rise of the Politics of Fear"
},
{
"Show Title": "Pandora's Box: A Fable from the Age of Science"
},
{
"Show Title": "Century of the Self"
},
{
"Show Title": "Scavengers Reign"
},
{
"Show Title": "Once Upon a Time in Northern Ireland"
},
{
"Show Title": "A Touch of Cloth"
},
{
"Show Title": "Adventure Time"
},
{
"Show Title": "Agents of Cracked"
},
{
"Show Title": "Ambulance"
},
{
"Show Title": "American Experience"
},
{
"Show Title": "Archer"
},
{
"Show Title": "Arrested Development"
},
{
"Show Title": "Band of Brothers"
},
{
"Show Title": "Battlestar Galactica"
},
{
"Show Title": "Battlestar Galactica: Blood & Chrome"
},
{
"Show Title": "BBS: The Documentary"
},
{
"Show Title": "Black Books"
},
{
"Show Title": "Black Dynamite"
},
{
"Show Title": "Black Mirror"
},
{
"Show Title": "Blackadder"
},
{
"Show Title": "Blue Planet II"
},
{
"Show Title": "Bluestone 42"
},
{
"Show Title": "Boardwalk Empire"
},
{
"Show Title": "Brass Eye"
},
{
"Show Title": "Bravest Warriors"
},
{
"Show Title": "Britz"
},
{
"Show Title": "Broadchurch"
},
{
"Show Title": "Broken Trail"
},
{
"Show Title": "Brooklyn Nine-Nine"
},
{
"Show Title": "Caprica"
},
{
"Show Title": "Cardiac Arrest"
},
{
"Show Title": "Charlie Brooker's Gameswipe"
},
{
"Show Title": "Charlie Brooker's Screen Wipe"
},
{
"Show Title": "Charlie Brooker's Weekly Wipe"
},
{
"Show Title": "Chernobyl"
},
{
"Show Title": "China, IL"
},
{
"Show Title": "Climate Change - The Facts"
},
{
"Show Title": "Community"
},
{
"Show Title": "Continuum"
},
{
"Show Title": "Cowboy Bebop"
},
{
"Show Title": "Dark Net"
},
{
"Show Title": "Dead Set"
},
{
"Show Title": "Deadwood"
},
{
"Show Title": "Death Note"
},
{
"Show Title": "Departures"
},
{
"Show Title": "Doctor Who"
},
{
"Show Title": "Dr. Horrible's Sing-Along Blog"
},
{
"Show Title": "Enlisted"
},
{
"Show Title": "Fargo"
},
{
"Show Title": "Firefly"
},
{
"Show Title": "Fist of Fun"
},
{
"Show Title": "Frisky Dingo"
},
{
"Show Title": "Futurama"
},
{
"Show Title": "Generation Kill"
},
{
"Show Title": "Ghost in the Shell: Stand Alone Complex"
},
{
"Show Title": "Gilmore Girls"
},
{
"Show Title": "Gilmore Girls: A Year in the Life"
},
{
"Show Title": "Halo 4: Forward Unto Dawn"
},
{
"Show Title": "Halt and Catch Fire"
},
{
"Show Title": "Hannibal"
},
{
"Show Title": "Happy Valley"
},
{
"Show Title": "Have I Got News for You"
},
{
"Show Title": "Horrifying Planet"
},
{
"Show Title": "How TV Ruined Your Life"
},
{
"Show Title": "Human Planet"
},
{
"Show Title": "Jam"
},
{
"Show Title": "John Adams"
},
{
"Show Title": "Justified"
},
{
"Show Title": "Lake Dredge Appraisal"
},
{
"Show Title": "Les Revenants"
},
{
"Show Title": "Life"
},
{
"Show Title": "Line Of Duty"
},
{
"Show Title": "Look Around You"
},
{
"Show Title": "Louie"
},
{
"Show Title": "Mad Men"
},
{
"Show Title": "Million Dollar Extreme Presents: World Peace"
},
{
"Show Title": "Monkey Dust"
},
{
"Show Title": "Mr. Robot"
},
{
"Show Title": "Mr. Show"
},
{
"Show Title": "My Name Is Earl"
},
{
"Show Title": "Nathan Barley"
},
{
"Show Title": "Never Mind the Buzzcocks"
},
{
"Show Title": "Newswipe With Charlie Brooker"
},
{
"Show Title": "No Gods, No Masters: A History of Anarchism"
},
{
"Show Title": "Now and Then, Here and There"
},
{
"Show Title": "Once Upon a Time in Iraq"
},
{
"Show Title": "Orange is the New Black"
},
{
"Show Title": "Orphan Black"
},
{
"Show Title": "Our Planet (2019)"
},
{
"Show Title": "Our War"
},
{
"Show Title": "Pandemic: How to Prevent an Outbreak"
},
{
"Show Title": "Peaky Blinders"
},
{
"Show Title": "Peep Show"
},
{
"Show Title": "Penn & Teller: Bullshit!"
},
{
"Show Title": "Person of Interest"
},
{
"Show Title": "Planet Earth II"
},
{
"Show Title": "Planetes"
},
{
"Show Title": "Pushing Daisies"
},
{
"Show Title": "Reaper"
},
{
"Show Title": "Red Dwarf"
},
{
"Show Title": "Reporters At War"
},
{
"Show Title": "Rick and Morty"
},
{
"Show Title": "Rome"
},
{
"Show Title": "Search Party"
},
{
"Show Title": "Serial Experiments Lain"
},
{
"Show Title": "Sex Education"
},
{
"Show Title": "Sex House"
},
{
"Show Title": "Silicon Valley"
},
{
"Show Title": "Small Axe"
},
{
"Show Title": "South Park"
},
{
"Show Title": "Spaced"
},
{
"Show Title": "SpongeBob SquarePants"
},
{
"Show Title": "Star Trek"
},
{
"Show Title": "Stephen Fry: Out There"
},
{
"Show Title": "Stewart Lee's Comedy Vehicle"
},
{
"Show Title": "Ted Lasso"
},
{
"Show Title": "Tenacious D"
},
{
"Show Title": "Texhnolyze"
},
{
"Show Title": "That Mitchell & Webb Look"
},
{
"Show Title": "The Abolitionists"
},
{
"Show Title": "The Boondocks"
},
{
"Show Title": "The Chosen"
},
{
"Show Title": "The Crown"
},
{
"Show Title": "The Day Today"
},
{
"Show Title": "The End of the F***ing World"
},
{
"Show Title": "The Fall"
},
{
"Show Title": "The Good Place"
},
{
"Show Title": "The Inbetweeners"
},
{
"Show Title": "The IT Crowd"
},
{
"Show Title": "The Office"
},
{
"Show Title": "The Pacific"
},
{
"Show Title": "The Prisoner"
},
{
"Show Title": "The Promise"
},
{
"Show Title": "The Simpsons"
},
{
"Show Title": "The Sopranos"
},
{
"Show Title": "The State (2017)"
},
{
"Show Title": "The Thick of It"
},
{
"Show Title": "The Whitest Kids U Know"
},
{
"Show Title": "The Windsors"
},
{
"Show Title": "The Wire"
},
{
"Show Title": "The Witcher"
},
{
"Show Title": "True Detective"
},
{
"Show Title": "Unsere Mütter, unsere Väter"
},
{
"Show Title": "Utopia"
},
{
"Show Title": "Warriors (1999)"
},
{
"Show Title": "Ways of Seeing"
},
{
"Show Title": "Wolf Hall"
},
{
"Show Title": "Yonderland"
}
]

View file

@ -1,205 +1,160 @@
# Script to add a new item to the log
from datetime import datetime
from dotenv import load_dotenv
import json
import logging
import os
import re
import requests
from urllib.request import urlopen
def import_film(imdb_id, log):
"""Import a film via the TMDB API, given an IMDB ID"""
logging.info(f"Processing {imdb_id}")
api_url = f"https://api.themoviedb.org/3/find/{imdb_id}"
# Sending API request
response = requests.get(
api_url,
params={
'external_source': 'imdb_id'
},
headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiI1NWQ2ZjY3YzJlOTQwMDI1NTFmN2VkNmEyZWVjM2E3NyIsInN1YiI6IjUxNWMyNzkxMTljMjk1MTQ0ZDAzZDM0NCIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.92eNKubJ_CORCIIlta30P9Qjg_Q9gPRFDTfG4gyz9kY'}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_film(imdb_id)
return
else:
logging.error(response.text)
response_data = json.loads(response.text)
if 1 == len(response_data['movie_results']):
film = response_data['movie_results'][0]
elif 0 == len(response_data['movie_results']):
logging.error(f"Returned no results for {imdb_id}")
return
elif 1 < len(response_data['movie_results']):
logging.warning(f"Returned more than one film for ID {imdb_id}")
print(f"Returned more than one film for ID {imdb_id}:")
print(json.dumps(response_data['movie_results'], indent=4))
id = input("Enter the index of the result to use:")
try:
film = response_data['movie_results'][id]
except:
logging.error("Index invalid!")
print("Index invalid!")
# Modify the returned result to add additional data
film = cleanup_film(film)
if 'log' == log:
date_watched = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_watched) is None:
date_watched = input("Enter date watched [YYYY-MM-DD, t for today]:")
if 't' == date_watched: date_watched = datetime.today().strftime('%Y-%m-%d')
film['date_watched'] = date_watched
is_rewatch = ''
while is_rewatch not in ['y', 'n']:
is_rewatch = input("Is this a rewatch? [y/n]:")
if 'y' == is_rewatch: film['is_rewatch'] = True
comments = input("Enter comments (optional):")
if '' != comments: film['comments'] = comments
# Validation step
correct = ''
print("Film data to add:")
print(json.dumps(film, indent=4))
if 'y' != input("Does this look correct? [y]:"): return
# Save changes
logging.info('Adding film to log…')
with open(f"./data/films/{log}.json", "r") as films_log:
films = json.load(films_log)
films.insert(0, film)
with open(f"./data/films/{log}.json", "w") as films_log:
json.dump(films, films_log, indent=4)
logging.info(f"Added film {film['title']} ({film['release_date']}) to log {log}")
def import_tv_episode(imdb_id, log):
"""Import a TV episode via the TMDB API, given an IMDB ID"""
logging.info(f"Processing {imdb_id}")
api_url = f"https://api.themoviedb.org/3/find/{imdb_id}"
# Sending API request
response = requests.get(
api_url,
params={
'external_source': 'imdb_id'
},
headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiI1NWQ2ZjY3YzJlOTQwMDI1NTFmN2VkNmEyZWVjM2E3NyIsInN1YiI6IjUxNWMyNzkxMTljMjk1MTQ0ZDAzZDM0NCIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.92eNKubJ_CORCIIlta30P9Qjg_Q9gPRFDTfG4gyz9kY'}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_film(imdb_id)
return
else:
logging.error(response.text)
response_data = json.loads(response.text)
if 1 == len(response_data['tv_episode_results']):
tv_episode = response_data['tv_episode_results'][0]
elif 0 == len(response_data['tv_episode_results']):
logging.error(f"Returned no results for {imdb_id}")
return
elif 1 < len(response_data['tv_episode_results']):
logging.warning(f"Returned more than one TV episode for ID {imdb_id}")
print(f"Returned more than one TV episode for ID {imdb_id}:")
print(json.dumps(response_data['tv_episode_results'], indent=4))
id = input("Enter the index of the result to use:")
try:
tv_episode = response_data['tv_episode_results'][id]
except:
logging.error("Index invalid!")
print("Index invalid!")
# Modify the returned result to add additional data
tv_episode = cleanup_tv_episode(tv_episode)
if 'log' == log:
date_watched = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_watched) is None:
date_watched = input("Enter date watched [YYYY-MM-DD, t for today]:")
if 't' == date_watched: date_watched = datetime.today().strftime('%Y-%m-%d')
tv_episode['date_watched'] = date_watched
is_rewatch = ''
while is_rewatch not in ['y', 'n']:
is_rewatch = input("Is this a rewatch? [y/n]:")
if 'y' == is_rewatch: tv_episode['is_rewatch'] = True
comments = input("Enter comments (optional):")
if '' != comments: tv_episode['comments'] = comments
# Validation step
correct = ''
print("TV episode data to add:")
print(json.dumps(tv_episode, indent=4))
if 'y' != input("Does this look correct? [y]:"): return
# Save changes
logging.info('Adding TV episode to log…')
with open(f"./data/tv/{log}.json", "r") as tv_episodes_log:
tv_episodes = json.load(tv_episodes_log)
tv_episodes.insert(0, tv_episode)
with open(f"./data/tv/{log}.json", "w") as tv_episodes_log:
json.dump(tv_episodes, tv_episodes_log, indent=4)
logging.info(f"Added TV episode {tv_episode['name']} ({tv_episode['air_date']}) to log {log}")
def cleanup_film(film):
"""Process a film returned by the TMDB API by removing unnecessary fields and adding others"""
del film['adult'], film['backdrop_path'], film['genre_ids'], film['popularity'], film['video'], film['vote_average'], film['vote_count']
if 'media_type' in film: del film['media_type']
if film['original_title'] == film['title'] and film['original_language'] == 'en':
del film['original_title'], film['original_language']
film['date_added'] = datetime.today().strftime('%Y-%m-%d')
return film
def cleanup_tv_episode(tv_episode):
"""Process a TV episode returned by the TMDB API by removing unnecessary fields and adding others"""
# eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhZ2UiOiIiLCJhcGlrZXkiOiJlNGRiYmZhYi0wZmM3LTRkMmEtYjgyZi0wZmRmMjAwOTcwOGYiLCJjb21tdW5pdHlfc3VwcG9ydGVkIjpmYWxzZSwiZXhwIjoxNzA3NTQ1NzQ5LCJnZW5kZXIiOiIiLCJoaXRzX3Blcl9kYXkiOjEwMDAwMDAwMCwiaGl0c19wZXJfbW9udGgiOjEwMDAwMDAwMCwiaWQiOiIxNTE5NDMiLCJpc19tb2QiOmZhbHNlLCJpc19zeXN0ZW1fa2V5IjpmYWxzZSwiaXNfdHJ1c3RlZCI6ZmFsc2UsInBpbiI6bnVsbCwicm9sZXMiOltdLCJ0ZW5hbnQiOiJ0dmRiIiwidXVpZCI6IiJ9.gdBQ7q3GKhl-Sr5GjjKQ4X2lJEuS5povPkYciYTY4kr_NMy_w_qBUV1lAjR-OVOyh3EB_zjroT08JiUUOUJbRGGNBpr7ct1gJgaiqKOncwawZZHoQOZMUw-wX77rdAmW93XusX9vF3HyQGp6982E6AdUhfsdx4be8DWDtG3roKnxXiwD5dC_0_V7eB-fYdk3xWSkAjJ4u7JxZTvsKuCpKFJu5ag4HB13tEgo2wB6PR4Bea1ocv2n9BJLbJevUvz4GmS8zNMMLvOTg9kbxr_BGX77XT0UU8L3Nxr21RblHkFfiR3DrqAp-DdKBNa_r7W0-fa7LrZqHFRq8FlSfjDqp29-uS4zOPYx6DxiBOCO30h0mOEncwnjiWRKEbPHMO9i53J8rbyOykwLhx6O6q431BTNpB8RFhhk5_RxGZfYNwXNl0XgSQSxeJgM9z19G5ADOCr4fvyTAu3KvKbmMFqNRxblHWOLiqGQjMZpjwOizVLMcTxICEv4HY6Sf9hM_deETWERmagmChsj1VACLa7Yar8wABuoQDFV3dMbDijDeEZgBc7CZ9NmAlYFW2YlARlqzI3lyIAJz_WKpmxZM400gNlDICPVqhT4VNq8ZYA2_bfu8yJxbm6BLpgqw_IPP2VLzKoGN8dCmavU_QeET21GNeDXuad9XcqxmZl9K1wPJCA
del tv_episode['still_path'], tv_episode['vote_average'], tv_episode['vote_count'], tv_episode['episode_type'], tv_episode['production_code'], tv_episode['runtime']
if 'media_type' in tv_episode: del tv_episode['media_type']
tv_episode['date_added'] = datetime.today().strftime('%Y-%m-%d')
return tv_episode
logging.basicConfig(filename='./logs/run.log', encoding='utf-8', level=logging.DEBUG)
media_type = ''
while media_type not in ['film', 'tv', 'book']:
media_type = input("Select media type [film|tv|book]:")
load_dotenv()
if 'film' == media_type:
TMDB_API_KEY = os.getenv('TMDB_API_KEY')
TVDB_API_KEY = os.getenv('TVDB_API_KEY')
OPENLIBRARY_API_KEY = os.getenv('OPENLIBRARY_API_KEY')
if "" == TMDB_API_KEY: logging.error("TMDB API key not found")
if "" == TVDB_API_KEY: logging.error("TVDB API key not found")
if "" == OPENLIBRARY_API_KEY: logging.error("OpenLibrary API key not found")
def add_item_to_log(item_id, media_type, log):
"""Add a film, book, TV series or TV episode to a log"""
logging.info(f"Processing {item_id}")
item = import_by_id(item_id, media_type)
if log in ['log', 'current']:
if 'log' == log:
date_watched = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_watched) is None:
date_watched = input("Enter date watched [YYYY-MM-DD, t for today]:")
if 't' == date_watched: date_watched = datetime.today().strftime('%Y-%m-%d')
item['date_watched'] = date_watched
elif 'current' == log:
date_started = ''
while re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date_started) is None:
date_started = input("Enter date started [YYYY-MM-DD, t for today]:")
if 't' == date_started: date_started = datetime.today().strftime('%Y-%m-%d')
item['date_started'] = date_started
is_rewatch = ''
while is_rewatch not in ['y', 'n']:
is_rewatch = input("Is this a rewatch? [y/n]:")
if 'y' == is_rewatch: item['is_rewatch'] = True
item['added_by_id'] = item_id
comments = input("Enter comments (optional):")
if '' != comments: item['comments'] = comments
# Validation step
correct = ''
print(f"{media_type} data to add:\n")
print(json.dumps(item, indent=4))
if 'y' != input("\nDoes this look correct? [y]: "): return
# Save changes
logging.info(f"Adding {media_type} to {log}")
with open(f"./data/{media_type}/{log}.json", "r") as log_file:
log_items = json.load(log_file)
log_items.insert(0, item)
with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4)
logging.info(f"Added {media_type} {item_id} to {log}")
def import_by_id(import_id, media_type):
if media_type in ['films', 'tv-series']:
return import_from_imdb_by_id(import_id, media_type)
elif media_type in ['tv-episodes']:
return #import_from_tvdb_by_id(import_id, media_type)
elif media_type in ['books']:
return #import_from_openlibrary_by_id(import_id, media_type)
def import_from_imdb_by_id(imdb_id, media_type):
"""Retrieve a film, TV show or TV episode from TMDB using an IMDB ID"""
api_url = f"https://api.themoviedb.org/3/find/{imdb_id}"
# Sending API request
response = requests.get(
api_url,
params={
'external_source': 'imdb_id'
},
headers={'Authorization': f"Bearer {TMDB_API_KEY}"}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_from_imdb_by_id(imdb_id, media_type)
return
else:
logging.error(response.text)
if ('films' == media_type): results_key = 'movie_results'
elif ('tv-episodes' == media_type): results_key = 'TODO'
elif ('tv-series' == media_type): results_key = 'tv_results'
response_data = json.loads(response.text)[results_key]
if 1 == len(response_data):
item = response_data[0]
elif 0 == len(response_data):
logging.error(f"Returned no results for {imdb_id}")
return
elif 1 < len(response_data):
logging.warning(f"Returned more than one {media_type} for ID {imdb_id}")
print(f"Returned more than one {media_type} for ID {imdb_id}:\n")
print(json.dumps(response_data, indent=4))
idx = input("\nEnter the index of the result to use: ")
try:
item = response_data[int(idx)]
except:
logging.error("Index invalid!")
print("Index invalid!")
# Modify the returned result to add additional data
return cleanup_result(item)
def cleanup_result(item):
"""Process a film or TV episode returned by the TMDB API by removing unnecessary fields and adding others"""
for field_name in ['adult', 'backdrop_path', 'episode_type', 'genre_ids', 'media_type', 'origin_country', 'popularity', 'production_code', 'runtime', 'still_path', 'video', 'vote_average', 'vote_count']:
if field_name in item: del item[field_name]
# TODO - select automatically
title_key = 'name'
if f"original_{title_key}" in item and 'original_language' in item:
if item[f"original_{title_key}"] == item[title_key] and item['original_language'] == 'en':
del item[f"original_{title_key}"], item['original_language']
if 'date_added' not in item: item['date_added'] = datetime.today().strftime('%Y-%m-%d')
return item
def main():
media_type = ''
while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']:
media_type = input("Select media type [films|tv-episodes|tv-series|books]:")
if 'films' == media_type:
log = ''
while log not in ['log', 'wishlist']:
log = input ("Enter log to update [log|wishlist]:")
@ -208,9 +163,9 @@ while media_type not in ['film', 'tv', 'book']:
while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID:")
import_film(imdb_id, log)
add_item_to_log(imdb_id, media_type, log)
elif 'book' == media_type:
elif 'books' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to update [log|current|wishlist]:")
@ -219,13 +174,24 @@ while media_type not in ['film', 'tv', 'book']:
while re.search("[0-9]+", isbn) is None:
isbn = input("Enter ISBN:")
elif 'tv' == media_type:
elif 'tv-episodes' == media_type:
imdb_id = ''
while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID:")
add_item_to_log(imdb_id, media_type, 'log')
elif 'tv-series' == media_type:
log = ''
while log not in ['log', 'wishlist']:
log = input ("Enter log to update [log|wishlist]:")
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to update [log|current|wishlist]:")
imdb_id = ''
while re.search("tt[0-9]+", imdb_id) is None:
imdb_id = input("Enter IMDB ID:")
import_tv_episode(imdb_id, log)
add_item_to_log(imdb_id, media_type, log)
if __name__ == "__main__":
main()

View file

@ -1,143 +0,0 @@
import json
import logging
import requests
import time
from urllib.request import urlopen
def process_items(items):
logging.info("Processing items…")
item_values = {}
for i, item in enumerate(items):
if 'id' not in item:
if 'Date Added' in item:
item_values['date_added'] = item['Date Added']
del item['Date Added']
if 'Date Watched' in item:
item_values['date_watched'] = item['Date Watched']
del item['Date Watched']
if 'Rewatch' in item:
item_values['is_rewatch'] = item['Rewatch']
del item['Rewatch']
if 'Comments' in item:
item_values['comments'] = item['Comments']
del item['Comments']
if 'IMDB ID' in item:
items[i] = populate_from_id(item)
else:
items[i] = populate_from_details(item)
items[i] |= item_values
with open("../data/films/wishlist.json", "w") as films_log:
json.dump(items, films_log, indent=4)
logging.info("Finished processing items")
def populate_from_details(item):
logging.info(f"Processing {item['Title']}")
api_url = f"https://api.themoviedb.org/3/search/movie"
# Sending API request
response = requests.get(
api_url,
params={
'query': item['Title'],
'include_adult': True,
'year': item['Release Year']
},
headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiI1NWQ2ZjY3YzJlOTQwMDI1NTFmN2VkNmEyZWVjM2E3NyIsInN1YiI6IjUxNWMyNzkxMTljMjk1MTQ0ZDAzZDM0NCIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.92eNKubJ_CORCIIlta30P9Qjg_Q9gPRFDTfG4gyz9kY'}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
populate_from_details(item)
else:
logging.error(response.text)
response_data = json.loads(response.text)
if 1 == len(response_data['results']):
film = response_data['results'][0]
return cleanup_film(film)
elif 0 == len(response_data['results']):
logging.warning(f"Returned no results for {item['Title']} ({item['Release Year']})")
elif 1 < len(response_data['results']):
response_data['results'] = [film for film in response_data['results'] if film['title'] == item['Title']]
if 1 < len(response_data['results']):
logging.warning(f"Returned more than one film for {item['Title']} ({item['Release Year']})")
item['IMDB ID'] = input(f"Enter IMDB ID for {item['Title']} ({item['Release Year']}):")
if item['IMDB ID'] != '':
return populate_from_id(item)
else:
logging.warning(f"Skipped {item['Title']} ({item['Release Year']})")
return item
def populate_from_id(item):
logging.info(f"Processing ID {item['IMDB ID']} ({item['Title']})…")
api_url = f"https://api.themoviedb.org/3/find/{item['IMDB ID']}"
# Sending API request
response = requests.get(
api_url,
params={
'external_source': 'imdb_id'
},
headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiI1NWQ2ZjY3YzJlOTQwMDI1NTFmN2VkNmEyZWVjM2E3NyIsInN1YiI6IjUxNWMyNzkxMTljMjk1MTQ0ZDAzZDM0NCIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.92eNKubJ_CORCIIlta30P9Qjg_Q9gPRFDTfG4gyz9kY'}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
populate_from_id(item)
else:
logging.error(response.text)
response_data = json.loads(response.text)
if len(response_data['movie_results']) > 1:
logging.warning(f"Returned more than one film for ID {item['IMDB ID']}")
return item
if len(response_data['movie_results']) > 0:
film = response_data['movie_results'][0]
return cleanup_film(film)
else:
logging.warning(f"Returning no results for {item['Title']}")
return item
return cleanup_film(film)
def cleanup_film(film):
del film['adult'], film['backdrop_path'], film['genre_ids'], film['popularity'], film['video'], film['vote_average'], film['vote_count']
if 'media_type' in film: del film['media_type']
if film['original_title'] == film['title'] and film['original_language'] == 'en':
del film['original_title'], film['original_language']
film['poster_path'] = f"https://www.themoviedb.org/t/p/original/{film['poster_path']}"
return film
logging.basicConfig(filename='../logs/run.log', encoding='utf-8', level=logging.DEBUG)
with open("../data/films/wishlist.json", "r") as films_log:
films = json.load(films_log)
process_items(films)

192
scripts/process_logs.py Normal file
View file

@ -0,0 +1,192 @@
from dotenv import load_dotenv
import json
import logging
import os
import re
import requests
import time
from urllib.request import urlopen
from add_item import cleanup_result, import_by_id
logging.basicConfig(filename='./logs/run.log', encoding='utf-8', level=logging.DEBUG)
load_dotenv()
TMDB_API_KEY = os.getenv('TMDB_API_KEY')
TVDB_API_KEY = os.getenv('TVDB_API_KEY')
OPENLIBRARY_API_KEY = os.getenv('OPENLIBRARY_API_KEY')
if "" == TMDB_API_KEY: logging.error("TMDB API key not found")
if "" == TVDB_API_KEY: logging.error("TVDB API key not found")
if "" == OPENLIBRARY_API_KEY: logging.error("OpenLibrary API key not found")
def process_log(media_type, log):
logging.info(f"Processing {media_type}/{log}")
with open(f"./data/{media_type}/{log}.json", "r") as log_file:
log_items = json.load(log_file)
log_item_values = {}
for i, item in enumerate(log_items):
try:
if 'id' not in item:
if 'films' == media_type: item_title = item['Title']
elif 'tv-episodes' == media_type: item_title = item['Episode Title']
elif 'tv-series' == media_type: item_title = item['Show Title']
logging.debug(f"Processing {item_title}")
# Rename pre-existing fields
if 'Date Added' in item:
log_item_values['date_added'] = item['Date Added']
del item['Date Added']
if 'Date Watched' in item:
log_item_values['date_watched'] = item['Date Watched']
del item['Date Watched']
if 'Rewatch' in item:
log_item_values['is_rewatch'] = item['Rewatch']
del item['Rewatch']
if 'Comments' in item:
log_item_values['comments'] = item['Comments']
del item['Comments']
if 'Series Title' in item:
log_item_values['series_title'] = item['Series Title']
del item['Series Title']
if 'Episode Title' in item:
log_item_values['name'] = item['Episode Title']
del item['Episode Title']
if 'Episode Number' in item:
split_num = log_item_values['episode_number'].split("E")
log_item_values['episode_number'] = split_num[1]
log_item_values['season_number'] = split_num[0] or None
del item['Episode Number']
if 'IMDB ID' in item:
log_items[i] = import_by_id(item['IMDB ID'], media_type)
else:
log_items[i] = import_by_details(item, item_title, media_type)
if log_items[i] is None:
item['imdb_id'] = input(f"Enter IMDB ID for {item_title}: ")
if re.search("tt[0-9]+", item['imdb_id']) is not None:
log_items[i] = import_by_id(item['imdb_id'], media_type)
with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4)
else:
logging.warning(f"Skipped {item_title}")
if log_items[i] is not None: log_items[i] |= log_item_values
except KeyError:
print(json.dumps(item, indent=4))
with open(f"./data/{media_type}/{log}.json", "w") as log_file:
json.dump(log_items, log_file, indent=4)
logging.info(f"Finished processing {media_type}/{log}")
def import_by_details(item, item_title, media_type):
if media_type in ['films', 'tv-series']:
return import_from_tmdb_by_details(item, item_title, media_type)
elif media_type in ['tv-episodes']:
return #import_from_tvdb_by_details(item, item_title, media_type)
elif media_type in ['books']:
return #import_from_openlibrary_by_details(item, item_title, media_type)
def import_from_tmdb_by_details(item, item_title, media_type):
"""Retrieve a film or TV series from TMDB using its title"""
logging.info(f"Processing {item_title}")
api_url = f"https://api.themoviedb.org/3/search/{'movie' if 'films' == media_type else 'tv'}"
# Sending API request
response = requests.get(
api_url,
params={
'query': item_title,
'include_adult': True,
'year': item['Release Year'] if 'Release Year' in item else None
},
headers={'Authorization': f"Bearer {TMDB_API_KEY}"}
)
# Process the response
if (200 == response.status_code):
logging.info(response.status_code)
elif (429 == response.status_code):
time.sleep(2)
import_from_tmdb_by_details(item)
else:
logging.error(response.text)
response_data = json.loads(response.text)['results']
if 1 == len(response_data):
return cleanup_result(response_data[0])
elif 0 == len(response_data):
logging.warning(f"Returned no {media_type} for {item_title}")
elif 1 < len(response_data):
if 'films' == media_type: title_key = 'title'
elif 'tv-series' == media_type: title_key = 'name'
response_data = [result for result in response_data if result[title_key] == item_title]
if 1 == len(response_data):
return cleanup_result(response_data[0])
else:
logging.warning(f"Returned more than one {media_type} for '{item_title}':\n")
print(json.dumps(response_data, indent=4))
idx = input("\nEnter the index of the result to use: ")
if "" != idx:
try:
return cleanup_result(response_data[int(idx)])
except:
logging.error("Index invalid!")
print("Index invalid!")
item['IMDB ID'] = input(f"Enter IMDB ID for {item_title}: ")
if '' != item['IMDB ID']:
return import_by_id(item['IMDB ID'], media_type)
else:
logging.warning(f"Skipped {item_title}")
return item
media_type = ''
while media_type not in ['films', 'tv-episodes', 'tv-series', 'books']:
media_type = input("Select media type [films|tv-episodes|tv-series|books]: ")
if 'films' == media_type:
log = ''
while log not in ['log', 'wishlist']:
log = input ("Enter log to process [log|wishlist]:")
process_log(media_type, log)
elif 'books' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:")
elif 'tv-episodes' == media_type:
process_log(media_type, 'log')
elif 'tv-series' == media_type:
log = ''
while log not in ['log', 'current', 'wishlist']:
log = input ("Enter log to process [log|current|wishlist]:")
process_log(media_type, log)