This commit is contained in:
IshaAtteri
2026-03-19 12:32:56 -04:00
parent a435592f75
commit db645f3bbe
5 changed files with 57 additions and 10 deletions

BIN
preprocessed_data.xlsx Normal file

Binary file not shown.

View File

@@ -5,33 +5,58 @@ from nltk.corpus import stopwords
import nltk
from nltk.stem import PorterStemmer, WordNetLemmatizer
from sentence_transformers import SentenceTransformer
import pkg_resources
from symspellpy.symspellpy import SymSpell, Verbosity
nltk.download('wordnet')
nltk.download('punkt_tab')
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
sym_spell = SymSpell(max_dictionary_edit_distance=2, prefix_length=7)
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
# model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
df = pd.read_excel('C:\\Users\\ishaa\\OneDrive\\Documents\\MSU\\Spring 2026\\Data mining\\Project\\sample_data.xlsx', engine='openpyxl')
# df = pd.read_excel('C:\\Users\\ishaa\\OneDrive\\Documents\\MSU\\Spring 2026\\Data mining\\Project\\sample_data.xlsx', engine='openpyxl')
def clean_text(text):
def clean_plot(text):
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
text = re.sub(r'\W', ' ', text) # Remove special characters
text = re.sub(r'\W', ' ', text)
suggestions = sym_spell.lookup_compound(text, max_edit_distance=2)
if suggestions:
text = suggestions[0].term
text = ([word for word in word_tokenize(text) if word not in stop_words])
text = [stemmer.stem(word) for word in text]
text = ' '.join(lemmatizer.lemmatize(word) for word in text)
return text
def get_genre(row):
movie = row['Title']
print(movie)
text = row['Genre']
text = text.split(".")[0]
text = text.replace(movie, "")
text = text.lower()
match = re.search(r'is a ((?:\S+\s+){4}\S+)', text)
if match:
words = match.group(1).split()
text = ' '.join(words[1:])
text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
text = re.sub(r'\W', ' ', text) # Remove special characters
text = ([word for word in word_tokenize(text) if word not in stop_words])
text = ' '.join(text)
return text
# print(df.columns)
df['preprocessed'] = df['Plot'].apply(clean_text)
sample_plot = df['preprocessed'][0]
print(sample_plot)
# df['preprocessed'] = df['Plot'].apply(clean_text)
# sample_plot = df['preprocessed'][0]
# print(sample_plot)
embeddings = model.encode(sample_plot)
print(embeddings)
# embeddings = model.encode(sample_plot)
# print(embeddings)

View File

@@ -73,7 +73,7 @@ def extract_movie_info(file_path):
plot = plot.strip()
return title, directed_by, cast, genre, plot
return title, directed_by, cast, genre, plot #image url
# -----------------------------
# Print results

View File

@@ -0,0 +1,22 @@
import pandas as pd
from sentence_transformers import SentenceTransformer
from preprocessing import clean_plot, get_genre
from sklearn.metrics.pairwise import cosine_similarity
df = pd.read_excel('C:\\Users\\ishaa\\OneDrive\\Documents\\MSU\\Spring 2026\\Data mining\\Project\\updated_data.xlsx', engine='openpyxl')
print(len(df))
df = df.dropna(subset=['Genre', 'Plot'])
print(len(df))
# df = df[:2]
df['Processed_Plot'] = df['Plot'].apply(clean_plot)
df['Genre'] = df[['Genre', 'Title']].apply(get_genre, axis=1)
df.to_excel('C:\\Users\\ishaa\\OneDrive\\Documents\\MSU\\Spring 2026\\Data mining\\Project\\preprocessed_data.xlsx', index=False)
print(df.columns)

Binary file not shown.