import torch
import torch.nn as nn
from models.lstm_model import LSTMModel
import argparse
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd

# Load vocabulary from training (using same vectorizer)
data = pd.read_csv("data/sentiment_data.csv")
texts = data['text'].tolist()
vectorizer = CountVectorizer()
vectorizer.fit(texts)
vocabulary = vectorizer.vocabulary_
max_len = max(len(text.split()) for text in texts) # Use same max_len as training

def text_to_indices(text, vocabulary, max_len):
    indices = [vocabulary.get(word, 0) for word in text.lower().split()]
    padded_indices = indices + [0] * (max_len - len(indices))
    return padded_indices

def predict_sentiment(text, model_path, vocabulary, max_len):
    # Model parameters (must match training)
    vocab_size = len(vocabulary) + 1
    embedding_dim = 50
    hidden_size = 100
    output_size = 2

    model = LSTMModel(vocab_size, embedding_dim, hidden_size, output_size)
    model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))) # Load trained weights
    model.eval()

    indices = text_to_indices(text, vocabulary, max_len)
    input_tensor = torch.tensor([indices], dtype=torch.long) # Batch size 1

    with torch.no_grad():
        output = model(input_tensor)
        _, predicted_class = torch.max(output, 1)

    sentiment_label = "Positive" if predicted_class.item() == 1 else "Negative"
    return sentiment_label

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Sentiment Prediction')
    parser.add_argument     ('--text', type=str, required=True, help='Text to predict sentiment for')
    args = parser.parse_args()

    model_path = 'sentiment_model.pth' # Path to saved model weights (if you saved it in train.py)
    sentiment = predict_sentiment(args.text, model_path, vocabulary, max_len)
    print(f"Text: '{args.text}'")
    print(f"Predicted Sentiment: {sentiment}
    ")