import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from models.lstm_model import LSTMModel

# Load data
data = pd.read_csv("data/sentiment_data.csv")
texts = data['text'].tolist()
sentiments = data['sentiment'].tolist()

# Text vectorization
vectorizer = CountVectorizer()
vectorizer.fit(texts)
vocab_size = len(vectorizer.vocabulary_)
X = vectorizer.transform(texts).toarray()

# Convert text to index sequences (simple example, replace with proper tokenization)
def text_to_indices(texts, vocabulary):
    indices_list = []
    for text in texts:
        indices = [vocabulary.get(word, 0) for word in text.lower().split()] # Simple split and lowercasing
        indices_list.append(indices)
    return indices_list

vocabulary = vectorizer.vocabulary_
indices_X = text_to_indices(texts, vocabulary)

# Padding sequences to max length (simple padding, replace with proper padding)
max_len = max(len(seq) for seq in indices_X)
padded_X = [seq + [0] * (max_len - len(seq)) for seq in indices_X]

X_tensor = torch.tensor(padded_X, dtype=torch.long)
y_tensor = torch.tensor(sentiments, dtype=torch.long)


X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor, test_size=0.2, random_state=42)

# Model parameters
embedding_dim = 50
hidden_size = 100
output_size = 2
learning_rate = 0.01
epochs = 10

# Initialize model, loss, optimizer
model = LSTMModel(vocab_size + 1, embedding_dim, hidden_size, output_size) # +1 for padding index 0
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()
    outputs = model(X_train)
    loss = criterion(outputs, y_train)
    loss.backward()
    optimizer.step()
    print(f'Epoch {epoch+1}/{epochs}, Loss: {loss.item():.4f}')

# Evaluation (simple accuracy)
model.eval()
with torch.no_grad():
    test_outputs = model(X_test)
    _, predicted = torch.max(test_outputs, 1)
    accuracy = (predicted == y_test).sum().item() / len(y_test)
    print(f'Test Accuracy: {accuracy:.4f}')
