Revisions to Zim parsing, netflix parsing, and updates to html scraping to include synopsis

This commit is contained in:
prabhaavp
2026-03-19 01:56:14 -04:00
parent 0a70920ba9
commit 492160c3a3
13 changed files with 252 additions and 63 deletions

4
.gitignore vendored
View File

@@ -217,4 +217,6 @@ __marimo__/
.streamlit/secrets.toml .streamlit/secrets.toml
# Data Folder # Data Folder
*.tsv *.tsv
data/

8
.idea/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

11
.idea/datamining_881_new.iml generated Normal file
View File

@@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/.venv" />
<excludeFolder url="file://$MODULE_DIR$/data/processed/wikipedia_html" />
</content>
<orderEntry type="jdk" jdkName="Python 3.13 (datamining_881_new)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

View File

@@ -0,0 +1,13 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="Eslint" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredIdentifiers">
<list>
<option value="dict.*" />
</list>
</option>
</inspection_tool>
</profile>
</component>

View File

@@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

7
.idea/misc.xml generated Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="Python 3.13 (datamining_881_new)" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.13 (datamining_881_new)" project-jdk-type="Python SDK" />
</project>

8
.idea/modules.xml generated Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/datamining_881_new.iml" filepath="$PROJECT_DIR$/.idea/datamining_881_new.iml" />
</modules>
</component>
</project>

7
.idea/vcs.xml generated Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

View File

@@ -2,45 +2,42 @@ import pandas as pd
import os import os
from scrape import extract_movie_info from scrape import extract_movie_info
script_dir = os.path.dirname(os.path.abspath(__file__))
# file_path = os.path.join(script_dir, "..", "sample_data.xlsx")
# movie_data = pd.read_excel(file_path)
# print(movie_data.columns)
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_DIR = r'C:\Users\Prabhaav\Projects\PyCharm\datamining_881\data\processed\wikipedia_html' INPUT_DIR = os.path.join(BASE_DIR, "../data/processed/wikipedia_html_test/")
SPREADSHEET_DIR = os.path.join(BASE_DIR, "../data/processed/spreadsheets/") SPREADSHEET_DIR = os.path.join(BASE_DIR, "../data/processed/spreadsheets/")
movie_data = pd.DataFrame(columns=['Title', 'Director', 'Cast', 'Genre', 'Plot', 'Release Date', 'Slug', 'Poster Filename']) rows = []
for folder in os.listdir(INPUT_DIR): for folder in os.listdir(INPUT_DIR):
path = os.path.join(INPUT_DIR, folder) path = os.path.join(INPUT_DIR, folder)
script_dir = os.path.join(path, next((f for f in os.listdir(path) if f.endswith(".html")), None)) script_dir = next((f for f in os.listdir(path) if f.endswith(".html")), None)
if not script_dir: if not script_dir:
continue continue
full_path = os.path.join(path, script_dir)
slug = os.path.splitext(script_dir)[0]
try: try:
print(script_dir) print(full_path)
title, directed_by, cast, genre, plot, year, poster_filename = extract_movie_info(script_dir) title, directed_by, cast, genre, plot, year, poster_filename = extract_movie_info(full_path)
new_row = { rows.append({
"Title": title, "Title": title,
"Director": directed_by, "Director": directed_by,
"Cast": ", ".join(cast), "Cast": ", ".join(cast),
"Genre": genre, "Genre": genre,
"Plot": plot, "Plot": plot,
"Release Date": year, "Release Date": year,
"Slug": script_dir, "Slug": slug,
"Poster Filename": poster_filename "Poster Filename": poster_filename
} })
movie_data.loc[len(movie_data)] = new_row
except Exception as e:
print("error:", e)
except KeyboardInterrupt: except KeyboardInterrupt:
output_path = os.path.join(SPREADSHEET_DIR, "updated_data.xlsx") movie_data = pd.DataFrame(rows)
print(output_path) output_path = os.path.join(SPREADSHEET_DIR, "updated_datav_test.xlsx")
movie_data.to_excel(output_path, index=False) movie_data.to_excel(output_path, index=False)
quit() quit()
except Exception as e:
print("error:", e)
output_path = os.path.join(SPREADSHEET_DIR, "updated_data.xlsx") movie_data = pd.DataFrame(rows)
output_path = os.path.join(SPREADSHEET_DIR, "updated_data_test.xlsx")
print(output_path) print(output_path)
movie_data.to_excel(output_path, index=False) movie_data.to_excel(output_path, index=False)

View File

@@ -8,8 +8,8 @@ import csv
from slugify import slugify from slugify import slugify
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_TSV = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/imdb_datasets/title.basics.tsv")) INPUT_TSV = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/imdb_datasets/title.basics.test.tsv"))
OUTPUT_DIR = os.path.abspath(os.path.join(BASE_DIR, "../data/processed/wikipedia_html")) OUTPUT_DIR = os.path.abspath(os.path.join(BASE_DIR, "../data/processed/wikipedia_html_test"))
ZIM_PATH = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/wikipedia/wikipedia_en_all_maxi_2025-08.zim")) ZIM_PATH = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/wikipedia/wikipedia_en_all_maxi_2025-08.zim"))
os.makedirs(OUTPUT_DIR, exist_ok=True) os.makedirs(OUTPUT_DIR, exist_ok=True)
@@ -21,39 +21,80 @@ print("The Zim file is now opened")
def sanitize_slug(slug): def sanitize_slug(slug):
return slugify(slug, separator="_", max_length=200) or "_unknown" return slugify(slug, separator="_", max_length=200) or "_unknown"
#Fetch the html AND the images and put them in a folder
def fetch_wikipedia_html_with_images(query, save_dir): def is_movie_page(html_content, primary_title, original_title, year):
soup = BeautifulSoup(html_content, "html.parser")
page_title = soup.find("h1", {"id": "firstHeading"})
if not page_title:
return False
page_title_text = page_title.get_text().lower()
if primary_title.lower() not in page_title_text and original_title.lower() not in page_title_text:
return False
infobox = soup.find("table", {"class": "infobox"})
if not infobox:
return False
infobox_text = infobox.get_text()
if "Directed by" not in infobox_text or ("Produced by" not in infobox_text and "Written by" not in infobox_text):
return False
# Also verify the year appears in the infobox
if year and year != "\\N" and year not in infobox_text:
return False
return True
# Fetch the html AND the images and put them in a folder
def fetch_wikipedia_html_with_images(query, save_dir, primary_title, original_title, year):
q = Query().set_query(query) q = Query().set_query(query)
search = searcher.search(q) search = searcher.search(q)
if search.getEstimatedMatches() == 0: if search.getEstimatedMatches() == 0:
return None return None
results = list(search.getResults(0, 5)) results = list(search.getResults(0, 5))
best_path = results[0]
try: for best_path in results:
entry = zim.get_entry_by_path(best_path)
item = entry.get_item()
html_content = bytes(item.content).decode("UTF-8")
except Exception:
return None
soup = BeautifulSoup(html_content, "html.parser")
for img in soup.find_all("img"):
src = img.get("src")
if not src:
continue
img_path = src.lstrip("/")
try: try:
img_entry = zim.get_entry_by_path(img_path) entry = zim.get_entry_by_path(best_path)
img_bytes = bytes(img_entry.get_item().content) item = entry.get_item()
html_content = bytes(item.content).decode("UTF-8")
except Exception: except Exception:
continue continue
img_name = os.path.basename(img_path)
img_file_path = os.path.join(save_dir, img_name)
with open(img_file_path, "wb") as f:
f.write(img_bytes)
img["src"] = img_name
return str(soup), best_path
#Go through each row of the tsv file and try to get the movie on wiki if not is_movie_page(html_content, primary_title, original_title, year):
continue
soup = BeautifulSoup(html_content, "html.parser")
poster_img = None
infobox = soup.find("table", class_="infobox")
if infobox:
poster_img = infobox.select_one("img")
if poster_img and poster_img.get("src"):
img_path = poster_img["src"].lstrip("/")
try:
img_entry = zim.get_entry_by_path(img_path)
img_bytes = bytes(img_entry.get_item().content)
img_name = os.path.basename(img_path)
with open(os.path.join(save_dir, img_name), "wb") as f:
f.write(img_bytes)
poster_img["src"] = img_name
except Exception:
pass
for img in soup.find_all("img"):
if img is not poster_img:
img["src"] = ""
return str(soup), best_path
return None
done_set = {
fname[:-5]
for d in os.listdir(OUTPUT_DIR)
if not d.startswith("_tmp_")
for fname in os.listdir(os.path.join(OUTPUT_DIR, d))
if fname.endswith(".html")
}
print(f"Found {len(done_set)} already processed")
# Go through each row of the tsv file and try to get the movie on wiki
with open(INPUT_TSV, encoding="utf-8") as f: with open(INPUT_TSV, encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t") reader = csv.DictReader(f, delimiter="\t")
for row in reader: for row in reader:
@@ -64,20 +105,15 @@ with open(INPUT_TSV, encoding="utf-8") as f:
if year is None or titleType != "movie": if year is None or titleType != "movie":
print("Skipping from TSV: ", title) print("Skipping from TSV: ", title)
continue continue
already_done = False if tconst in done_set:
for d in os.listdir(OUTPUT_DIR):
if os.path.exists(os.path.join(OUTPUT_DIR, d, f"{tconst}.html")):
already_done = True
break
if already_done:
print(f"Skipping already processed: {tconst}") print(f"Skipping already processed: {tconst}")
continue continue
# folder for each movie # folder for each movie
movie_dir = os.path.join(OUTPUT_DIR, f"_tmp_{tconst}") movie_dir = os.path.join(OUTPUT_DIR, f"_tmp_{tconst}")
os.makedirs(movie_dir, exist_ok=True) os.makedirs(movie_dir, exist_ok=True)
query = f"{title} ({year} film)" if year != "\\N" else title #if year not empty query = f"{title} ({year} film)" if year != "\\N" else title # if year not empty
print(f"fetching Wikipedia HTML + images for {tconst}: {query}") print(f"fetching Wikipedia HTML + images for {tconst}: {query}")
result = fetch_wikipedia_html_with_images(query, movie_dir) result = fetch_wikipedia_html_with_images(query, movie_dir, title, row["originalTitle"], row["startYear"])
if result is None: if result is None:
print("Wikipedia fetch failed") print("Wikipedia fetch failed")
shutil.rmtree(movie_dir, ignore_errors=True) shutil.rmtree(movie_dir, ignore_errors=True)
@@ -86,9 +122,6 @@ with open(INPUT_TSV, encoding="utf-8") as f:
html_with_images, slug = result html_with_images, slug = result
slug_dir = os.path.join(OUTPUT_DIR, sanitize_slug(slug)) slug_dir = os.path.join(OUTPUT_DIR, sanitize_slug(slug))
if html_with_images: if html_with_images:
if "Directed by" not in html_with_images:
shutil.rmtree(movie_dir, ignore_errors=True)
continue
if os.path.exists(slug_dir): if os.path.exists(slug_dir):
shutil.rmtree(movie_dir, ignore_errors=True) shutil.rmtree(movie_dir, ignore_errors=True)
else: else:
@@ -98,6 +131,7 @@ with open(INPUT_TSV, encoding="utf-8") as f:
continue continue
with open(outfile, "w", encoding="utf-8") as out: with open(outfile, "w", encoding="utf-8") as out:
out.write(html_with_images) out.write(html_with_images)
done_set.add(tconst)
else: else:
shutil.rmtree(movie_dir, ignore_errors=True) shutil.rmtree(movie_dir, ignore_errors=True)
print(f"no Wikipedia page found for {query}") print(f"no Wikipedia page found for {query}")

View File

@@ -0,0 +1,91 @@
import pandas as pd
import os
from rapidfuzz import fuzz
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
NETFLIX_DIR = os.path.join(BASE_DIR, "../data/raw/netflix/")
MOVIE_EXCEL = os.path.join(BASE_DIR, "../data/processed/spreadsheets/updated_data_test.xlsx")
MOVIE_TITLES = os.path.join(NETFLIX_DIR, "movie_titles.csv")
COMBINED_FILES = [os.path.join(NETFLIX_DIR, f"combined_data_{i}.txt") for i in range(1, 5)]
OUTPUT = os.path.join(BASE_DIR, "../data/processed/spreadsheets/fused_gtruth_test.csv")
TITLE_THRESHOLD = 85 # fuzzy search
main_data = pd.read_excel(MOVIE_EXCEL)
main_data["title_lower"] = main_data["Title"].str.lower().str.strip()
main_data["director_lower"] = main_data["Director"].fillna("").str.lower().str.strip()
records = []
with open(MOVIE_TITLES, encoding="latin-1") as f:
for line in f:
line = line.strip()
parts = line.split(",", 2)
if len(parts) == 3:
records.append({"netflix_id": int(parts[0]), "year": parts[1], "title": parts[2].strip()})
titles_df = pd.DataFrame(records)
titles_df["title_lower"] = titles_df["title"].str.lower().str.strip()
netflix_id_to_tt = {} # netflix_id -> tt_id
for _, nrow in titles_df.iterrows():
best_score = 0
best_meta = None
#https://github.com/rapidfuzz/RapidFuzz docs
for _, mrow in main_data.iterrows():
score = fuzz.ratio(nrow["title_lower"], mrow["title_lower"])
if score > best_score:
best_score = score
best_meta = mrow
if best_score < TITLE_THRESHOLD or best_meta is None:
continue
# Director match
confirmed = best_score >= TITLE_THRESHOLD
print(best_score)
if best_meta["director_lower"] and best_score >= 70:
# year relese year match
try:
meta_year = str(best_meta["Release Date"])
nf_year = str(int(nrow["year"])) if pd.notna(nrow["year"]) else ""
if nf_year and nf_year in meta_year:
confirmed = True
except Exception:
pass
if confirmed:
netflix_id_to_tt[int(nrow["netflix_id"])] = best_meta["Slug"]
print(f"Matched {len(netflix_id_to_tt)} Netflix movies to tt Ids")
valid_netflix_ids = set(netflix_id_to_tt.keys())
rows = []
current_movie_id = None
for filepath in COMBINED_FILES:
print(f"Reading {os.path.basename(filepath)}...")
with open(filepath, encoding="latin-1") as f:
for line in f:
line = line.strip()
if line.endswith(":"):
current_movie_id = int(line[:-1])
elif current_movie_id in valid_netflix_ids:
parts = line.split(",")
if len(parts) == 3:
customer_id, rating, date = parts
rows.append({
"customer_id": int(customer_id),
"tt_id": netflix_id_to_tt[current_movie_id],
"rating": int(rating),
"date": date,
})
print(f"Found {len(rows):,} rating")
print(f"Found {len(valid_netflix_ids):,} movies ground truth")
df = pd.DataFrame(rows)
df.to_csv(OUTPUT, index=False)
print(f"Written to {OUTPUT}")

View File

@@ -1,11 +1,8 @@
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import os import os
# script_dir = os.path.dirname(os.path.abspath(__file__))
# file_path = os.path.join(script_dir, "..", "data", "tt0074888.html")
def extract_movie_info(file_path): def extract_movie_info(file_path):
with open(file_path, "r", encoding="utf-8") as f: with open(file_path, "r", encoding="utf-8") as f:
html = f.read() html = f.read()
@@ -66,17 +63,25 @@ def extract_movie_info(file_path):
# ----------------------------- # -----------------------------
plot = "" plot = ""
plot_header = soup.find(id="Plot") plot_header = soup.find(id="Plot") or soup.find(id="Synopsis")
if plot_header: if plot_header:
current = plot_header.parent current = plot_header.parent
for sibling in current.find_next_siblings(): for sibling in current.find_next_siblings():
if sibling.name == "div" and "mw-heading2" in sibling.get("class", []): if sibling.name == "div" and "mw-heading2" in sibling.get("class", []):
break break
if sibling.name == "p": if sibling.name == "p":
plot += sibling.get_text(" ", strip=True) + " " plot += sibling.get_text(" ", strip=True) + " "
if not plot and content:
for el in content.find_all(["p", "div"], recursive=False):
if el.name == "div" and el.find(["h2"]):
break
if el.name == "p":
text = el.get_text(" ", strip=True)
if text:
plot += text + " "
plot = plot.strip() plot = plot.strip()
return title, directed_by, cast, genre, plot, year, poster_filename return title, directed_by, cast, genre, plot, year, poster_filename