some changes

This commit is contained in:
IshaAtteri
2026-03-19 12:45:32 -04:00
41 changed files with 6751 additions and 739 deletions

View File

@@ -0,0 +1,115 @@
import os
import re
import csv
import pandas as pd
from bs4 import BeautifulSoup
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_DIR = os.path.join(BASE_DIR, "../data/processed/wikipedia_html")
OUTPUT_TSV = os.path.join(BASE_DIR, "../data/processed/spreadsheet/wikipedia_metadata4.tsv")
WHITELIST = {
"slug",
"title",
"poster_filename",
"Directed by",
"Produced by",
"Written by",
"Starring",
"Release date",
"Running time",
"Country",
"Language",
"Budget",
"Box office",
"Plot"
}
def clean(el):
if not el:
return ""
for br in el.find_all("br"):
br.replace_with(" | ")
return re.sub(r"\s+", " ", el.get_text(" ", strip=True)).strip()
def parse_html(path, slug):
with open(path, encoding="utf-8") as f:
soup = BeautifulSoup(f, "html.parser")
row = {"slug": slug}
h1 = soup.select_one("h1.firstHeading")
if h1:
row["title"] = h1.get_text(strip=True)
else:
row["title"] = ""
# infobox
infobox = soup.select_one("table.infobox")
if infobox:
img = infobox.select_one("img")
if img and img.get("src"):
row["poster_filename"] = os.path.basename(img["src"])
else:
row["poster_filename"] = ""
for tr in infobox.select("tr"):
th = tr.select_one(".infobox-label")
td = tr.select_one(".infobox-data")
if th and td:
row[clean(th)] = clean(td)
# sections
content = soup.select_one(".mw-parser-output")
if not content:
return {k: v for k, v in row.items() if k in WHITELIST}
skip = {"references", "external links", "see also"}
current = None
lead = []
for el in content.children:
if getattr(el, "name", None) == "div" and "mw-heading" in el.get("class", []):
h = el.find(["h2", "h3", "h4", "h5", "h6"]) #assuming no more than first 6 headers need to be looked at
if h:
title = clean(h)
if title.lower() in skip:
current = None
else:
current = title
if current:
row[current] = ""
continue
if not current:
if getattr(el, "name", None) == "p":
text = clean(el)
if text:
lead.append(text)
continue
if el.name in ["p", "ul", "ol", "table"]:
text = clean(el)
if text:
row[current] += text
if lead:
if row.get("Plot"):
row["Plot"] = " | ".join(lead) + " | " + row["Plot"]
else:
row["Plot"] = " | ".join(lead)
return {k: v for k, v in row.items() if k in WHITELIST}
def main():
rows = []
for folder in os.listdir(INPUT_DIR):
path = os.path.join(INPUT_DIR, folder)
html = next((f for f in os.listdir(path) if f.endswith(".html")), None)
if not html:
continue
try:
rows.append(parse_html(os.path.join(path, html), folder))
except Exception as e:
print("error:", html, e)
df = pd.DataFrame(rows).fillna("")
if df.empty:
print("The folder was empty / None parsed")
return
cols = ["slug", "poster_filename"] + [c for c in df.columns if c not in ("slug", "poster_filename")]
df = df[cols]
os.makedirs(os.path.dirname(OUTPUT_TSV), exist_ok=True)
df.to_csv(OUTPUT_TSV, sep="\t", index=False, quoting=csv.QUOTE_NONE, escapechar="\\")
print(f"Wrote {len(df)} rows -> {OUTPUT_TSV}")
if __name__ == "__main__":
main()

12
Not_used/load.py Normal file
View File

@@ -0,0 +1,12 @@
import pandas as pd
import dtale
file_path = '../data/raw/imdb_datasets/title.basics.tsv'
pd.set_option('display.max_columns', None) # show all columns
pd.set_option('display.width', 1000) # prevent columns from wrapping
df = pd.read_csv(file_path, sep='\t', nrows=1)
print(df)
d = dtale.show(df, subprocess=False)
d.open_browser()

62
Not_used/preprocessing.py Normal file
View File

@@ -0,0 +1,62 @@
import pandas as pd
import string, re
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import nltk
from nltk.stem import PorterStemmer, WordNetLemmatizer
from sentence_transformers import SentenceTransformer
import pkg_resources
from symspellpy.symspellpy import SymSpell, Verbosity
nltk.download('wordnet')
nltk.download('punkt_tab')
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
sym_spell = SymSpell(max_dictionary_edit_distance=2, prefix_length=7)
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
# model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
# df = pd.read_excel('C:\\Users\\ishaa\\OneDrive\\Documents\\MSU\\Spring 2026\\Data mining\\Project\\sample_data.xlsx', engine='openpyxl')
def clean_plot(text):
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
text = re.sub(r'\W', ' ', text)
suggestions = sym_spell.lookup_compound(text, max_edit_distance=2)
if suggestions:
text = suggestions[0].term
text = ([word for word in word_tokenize(text) if word not in stop_words])
text = [stemmer.stem(word) for word in text]
text = ' '.join(lemmatizer.lemmatize(word) for word in text)
return text
def get_genre(row):
movie = row['Title']
print(movie)
text = row['Genre']
text = text.split(".")[0]
text = text.replace(movie, "")
text = text.lower()
match = re.search(r'is a ((?:\S+\s+){4}\S+)', text)
if match:
words = match.group(1).split()
text = ' '.join(words[1:])
text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
text = re.sub(r'\W', ' ', text) # Remove special characters
text = ([word for word in word_tokenize(text) if word not in stop_words])
text = ' '.join(text)
return text
# print(df.columns)
# df['preprocessed'] = df['Plot'].apply(clean_text)
# sample_plot = df['preprocessed'][0]
# print(sample_plot)
# embeddings = model.encode(sample_plot)
# print(embeddings)

63
Not_used/rank_cols.py Normal file
View File

@@ -0,0 +1,63 @@
import os
import csv
import sys
from collections import defaultdict
from tqdm import tqdm
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TSV_PATH = os.path.join(BASE_DIR, "../data/processed/spreadsheet/wikipedia_metadata3.tsv")
OUTPUT_PATH = os.path.join(BASE_DIR, "../data/processed/spreadsheet/rank_cols_output.txt")
csv.field_size_limit(min(sys.maxsize, 2**31 - 1)) # try to increase max buffer so it doesn't fail
#https://stackoverflow.com/questions/53538888/counting-csv-column-occurrences-on-the-fly-in-python
def main():
lines = []
def log(msg=""):
print(msg)
lines.append(str(msg))
log(f"Reading: {TSV_PATH}")
file_size = os.path.getsize(TSV_PATH)
col_filled = defaultdict(int)
row_count = 0
with open(TSV_PATH, encoding="utf-8", buffering=4 * 1024 * 1024) as f:
reader = csv.reader(f, delimiter="\t")
headers = next(reader)
num_cols = len(headers)
with tqdm(total=file_size, unit="B", unit_scale=True, unit_divisor=1024, desc="Processing") as pbar:
for row in reader:
row_count += 1
for i, val in enumerate(row):
if val and val.strip():
col_filled[headers[i]] += 1
pbar.update(sum(map(len, row)) + num_cols) #progress bar
log(f"\nTotal rows: {row_count:,}")
log(f"Total columns: {num_cols}\n")
ranked = sorted(
headers,
key=lambda c: col_filled.get(c, 0) / row_count,
reverse=True,
)
log(f"{'#':<5} {'Column':<40} {'Filled':>10} {'Total':>10} {'Fill %':>8}")
log("-" * 75)
for i, col in enumerate(ranked, 1):
filled = col_filled.get(col, 0)
pct = filled / row_count * 100
log(f"{i:<5} {col:<40} {filled:>10,} {row_count:>10,} {pct:>7.1f}%")
with open(OUTPUT_PATH, "w", encoding="utf-8") as out:
out.write("\n".join(lines))
print(f"\nOutput written to: {OUTPUT_PATH}")
if __name__ == "__main__":
main()

69
Not_used/scrape_wiki.py Normal file
View File

@@ -0,0 +1,69 @@
import csv
import os
import requests
from time import sleep
HEADERS = {"User-Agent": "cse881"}
SEARCH_URL = "https://en.wikipedia.org/w/api.php"
BASE_URL = "https://en.wikipedia.org/api/rest_v1"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_TSV = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/imdb_datasets/title.basics.test.tsv"))
OUTPUT_DIR = os.path.abspath(os.path.join(BASE_DIR, "../data/raw/wikipedia/wikipedia_html"))
os.makedirs(OUTPUT_DIR, exist_ok=True)
def fetch_wikipedia_html(query):
params = {
"action": "query",
"list": "search",
"srsearch": query,
"format": "json"
}
resp = requests.get(SEARCH_URL, params=params, headers=HEADERS).json()
results = resp.get("query", {}).get("search", [])
if not results:
return None
best_title = results[0]["title"]
wiki_title = best_title.replace(" ", "_")
html_url = f"{BASE_URL}/page/html/{wiki_title}"
r = requests.get(html_url, headers=HEADERS)
if r.status_code != 200:
return None
return r.text
with open(INPUT_TSV, encoding="utf-8") as f:
print("Opened file:", INPUT_TSV)
print("First 500 chars:")
print(f.read(500))
f.seek(0)
reader = csv.DictReader(f, delimiter="\t")
for row in reader:
tconst = row["tconst"]
title = row["primaryTitle"]
year = row["startYear"]
outfile = os.path.join(OUTPUT_DIR, f"{tconst}.html")
print(outfile)
if os.path.exists(outfile):
print(f"Skipping {tconst}: {query}")
continue #if exists, skip
query = f"{title} {year}" if year != "\\N" else title
print(f"Fetching Wikipedia for {tconst}: {query}")
html = fetch_wikipedia_html(query)
if html:
with open(outfile, "w", encoding="utf-8") as out:
out.write(html)
else:
print(f"No Wikipedia page found")
sleep(0.5)
print("Completed")
#https://en.wikipedia.org/w/index.php?api=wmf-restbase&title=Special%3ARestSandbox#/Page%20content/get_page_summary__title_