- Fix directories

This commit is contained in:
prabhaavp
2026-03-10 13:10:25 -04:00
parent 401e7e5497
commit 0ac1234afa
4 changed files with 78 additions and 3 deletions

View File

View File

@@ -6,7 +6,7 @@ import csv
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_TSV = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/imdb_datasets/title.basics.test.tsv")) INPUT_TSV = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/imdb_datasets/title.basics.test.tsv"))
OUTPUT_DIR = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/wikipedia/wikipedia_html")) OUTPUT_DIR = os.path.abspath(os.path.join(BASE_DIR, "../data/processed/wikipedia_html"))
ZIM_PATH = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/wikipedia/wikipedia_en_all_maxi_2025-08.zim")) ZIM_PATH = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/wikipedia/wikipedia_en_all_maxi_2025-08.zim"))
os.makedirs(OUTPUT_DIR, exist_ok=True) os.makedirs(OUTPUT_DIR, exist_ok=True)
@@ -53,16 +53,22 @@ with open(INPUT_TSV, encoding="utf-8") as f:
tconst = row["tconst"] tconst = row["tconst"]
title = row["primaryTitle"] title = row["primaryTitle"]
year = row["startYear"] year = row["startYear"]
titleType = row["titleType"]
if year is None or titleType != "movie":
print("Skipping from TSV: ", title)
continue
# folder for each movie # folder for each movie
movie_dir = os.path.join(OUTPUT_DIR, tconst) movie_dir = os.path.join(OUTPUT_DIR, tconst)
os.makedirs(movie_dir, exist_ok=True) os.makedirs(movie_dir, exist_ok=True)
outfile = os.path.join(movie_dir, f"{tconst}.html") outfile = os.path.join(movie_dir, f"{tconst}.html")
if os.path.exists(outfile): if os.path.exists(outfile):
continue continue
query = f"{title} {year}" if year != "\\N" else title #if year not empty query = f"{title} ({year} film)" if year != "\\N" else title #if year not empty
print(f"fetching Wikipedia HTML + images for {tconst}: {query}") print(f"fetching Wikipedia HTML + images for {tconst}: {query}")
html_with_images = fetch_wikipedia_html_with_images(query, movie_dir) html_with_images = fetch_wikipedia_html_with_images(query, movie_dir)
if html_with_images: if html_with_images:
if "Directed by" not in html_with_images:
continue
with open(outfile, "w", encoding="utf-8") as out: with open(outfile, "w", encoding="utf-8") as out:
out.write(html_with_images) out.write(html_with_images)
else: else:

View File

@@ -19,7 +19,7 @@ response = requests.get(url, headers=headers, params=params)
print("Status:", response.status_code) print("Status:", response.status_code)
print("Content-Type:", response.headers.get("content-type")) print("Content-Type:", response.headers.get("content-type"))
print("First 200 chars:\n", response.text[:200]) print("First 200 chars:\n", response.text)
data = response.json() data = response.json()

69
scripts/scrape_wiki.py Normal file
View File

@@ -0,0 +1,69 @@
import csv
import os
import requests
from time import sleep
HEADERS = {"User-Agent": "cse881"}
SEARCH_URL = "https://en.wikipedia.org/w/api.php"
BASE_URL = "https://en.wikipedia.org/api/rest_v1"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_TSV = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/imdb_datasets/title.basics.test.tsv"))
OUTPUT_DIR = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/wikipedia/wikipedia_html"))
os.makedirs(OUTPUT_DIR, exist_ok=True)
def fetch_wikipedia_html(query):
params = {
"action": "query",
"list": "search",
"srsearch": query,
"format": "json"
}
resp = requests.get(SEARCH_URL, params=params, headers=HEADERS).json()
results = resp.get("query", {}).get("search", [])
if not results:
return None
best_title = results[0]["title"]
wiki_title = best_title.replace(" ", "_")
html_url = f"{BASE_URL}/page/html/{wiki_title}"
r = requests.get(html_url, headers=HEADERS)
if r.status_code != 200:
return None
return r.text
with open(INPUT_TSV, encoding="utf-8") as f:
print("Opened file:", INPUT_TSV)
print("First 500 chars:")
print(f.read(500))
f.seek(0)
reader = csv.DictReader(f, delimiter="\t")
for row in reader:
tconst = row["tconst"]
title = row["primaryTitle"]
year = row["startYear"]
outfile = os.path.join(OUTPUT_DIR, f"{tconst}.html")
print(outfile)
if os.path.exists(outfile):
print(f"Skipping {tconst}: {query}")
continue #if exists, skip
query = f"{title} {year}" if year != "\\N" else title
print(f"Fetching Wikipedia for {tconst}: {query}")
html = fetch_wikipedia_html(query)
if html:
with open(outfile, "w", encoding="utf-8") as out:
out.write(html)
else:
print(f"No Wikipedia page found")
sleep(0.5)
print("Completed")
#https://en.wikipedia.org/w/index.php?api=wmf-restbase&title=Special%3ARestSandbox#/Page%20content/get_page_summary__title_