preprocessing script
This commit is contained in:
BIN
sample_data.xlsx
Normal file
BIN
sample_data.xlsx
Normal file
Binary file not shown.
37
scripts/preprocessing.py
Normal file
37
scripts/preprocessing.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import pandas as pd
|
||||
import string, re
|
||||
from nltk.tokenize import word_tokenize
|
||||
from nltk.corpus import stopwords
|
||||
import nltk
|
||||
from nltk.stem import PorterStemmer, WordNetLemmatizer
|
||||
from sentence_transformers import SentenceTransformer
|
||||
|
||||
nltk.download('wordnet')
|
||||
nltk.download('punkt_tab')
|
||||
nltk.download('stopwords')
|
||||
|
||||
stop_words = set(stopwords.words('english'))
|
||||
|
||||
stemmer = PorterStemmer()
|
||||
lemmatizer = WordNetLemmatizer()
|
||||
|
||||
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
||||
|
||||
df = pd.read_excel('C:\\Users\\ishaa\\OneDrive\\Documents\\MSU\\Spring 2026\\Data mining\\Project\\sample_data.xlsx', engine='openpyxl')
|
||||
|
||||
def clean_text(text):
|
||||
text = text.lower()
|
||||
text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
|
||||
text = re.sub(r'\W', ' ', text) # Remove special characters
|
||||
text = ([word for word in word_tokenize(text) if word not in stop_words])
|
||||
text = [stemmer.stem(word) for word in text]
|
||||
text = ' '.join(lemmatizer.lemmatize(word) for word in text)
|
||||
return text
|
||||
# print(df.columns)
|
||||
|
||||
df['preprocessed'] = df['Plot'].apply(clean_text)
|
||||
sample_plot = df['preprocessed'][0]
|
||||
print(sample_plot)
|
||||
|
||||
embeddings = model.encode(sample_plot)
|
||||
print(embeddings)
|
||||
@@ -9,8 +9,8 @@ headers = {
|
||||
params = {
|
||||
"action": "query",
|
||||
"format": "json",
|
||||
"titles": "Godfather",
|
||||
"prop": "extracts",
|
||||
"titles": "Interstellar",
|
||||
"explaintext": True,
|
||||
"redirects": 1
|
||||
}
|
||||
@@ -19,12 +19,14 @@ response = requests.get(url, headers=headers, params=params)
|
||||
|
||||
print("Status:", response.status_code)
|
||||
print("Content-Type:", response.headers.get("content-type"))
|
||||
print("First 200 chars:\n", response.text[:200])
|
||||
print("First 200 chars:\n", response.text[:1000])
|
||||
|
||||
data = response.json()
|
||||
|
||||
|
||||
|
||||
pages = data["query"]["pages"]
|
||||
page = next(iter(pages.values()))
|
||||
|
||||
print("\nTitle:", page["title"])
|
||||
print("\nPreview:\n", page["extract"][:500])
|
||||
print("\nPreview:\n", page["extract"])
|
||||
|
||||
Reference in New Issue
Block a user