preprocessing script

This commit is contained in:
IshaAtteri
2026-03-10 14:14:59 -04:00
parent cb2fcd19eb
commit 8fa2cdba3c
3 changed files with 42 additions and 3 deletions

BIN
sample_data.xlsx Normal file

Binary file not shown.

37
scripts/preprocessing.py Normal file
View File

@@ -0,0 +1,37 @@
import pandas as pd
import string, re
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import nltk
from nltk.stem import PorterStemmer, WordNetLemmatizer
from sentence_transformers import SentenceTransformer
nltk.download('wordnet')
nltk.download('punkt_tab')
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
df = pd.read_excel('C:\\Users\\ishaa\\OneDrive\\Documents\\MSU\\Spring 2026\\Data mining\\Project\\sample_data.xlsx', engine='openpyxl')
def clean_text(text):
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
text = re.sub(r'\W', ' ', text) # Remove special characters
text = ([word for word in word_tokenize(text) if word not in stop_words])
text = [stemmer.stem(word) for word in text]
text = ' '.join(lemmatizer.lemmatize(word) for word in text)
return text
# print(df.columns)
df['preprocessed'] = df['Plot'].apply(clean_text)
sample_plot = df['preprocessed'][0]
print(sample_plot)
embeddings = model.encode(sample_plot)
print(embeddings)

View File

@@ -9,8 +9,8 @@ headers = {
params = { params = {
"action": "query", "action": "query",
"format": "json", "format": "json",
"titles": "Godfather",
"prop": "extracts", "prop": "extracts",
"titles": "Interstellar",
"explaintext": True, "explaintext": True,
"redirects": 1 "redirects": 1
} }
@@ -19,12 +19,14 @@ response = requests.get(url, headers=headers, params=params)
print("Status:", response.status_code) print("Status:", response.status_code)
print("Content-Type:", response.headers.get("content-type")) print("Content-Type:", response.headers.get("content-type"))
print("First 200 chars:\n", response.text[:200]) print("First 200 chars:\n", response.text[:1000])
data = response.json() data = response.json()
pages = data["query"]["pages"] pages = data["query"]["pages"]
page = next(iter(pages.values())) page = next(iter(pages.values()))
print("\nTitle:", page["title"]) print("\nTitle:", page["title"])
print("\nPreview:\n", page["extract"][:500]) print("\nPreview:\n", page["extract"])