import gensim
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import pandas as pd
from torch.optim import Adam
from torch.nn.utils.rnn import pad_sequence


w2v_model = gensim.models.KeyedVectors.load_word2vec_format(
    "./word2vec_google_news.bin", binary=True
)

device = "cuda:0"


class SST2Dataset(Dataset):
    def __init__(self, filename, w2v_model, embedding_dim=300):
        self.data = pd.read_csv(filename)
        self.w2v_model = w2v_model
        self.embedding_dim = embedding_dim

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        sentence = self.data.iloc[idx]["sentence"]
        label = self.data.iloc[idx]["label"]
        embeddings = [
            self.w2v_model[word] for word in sentence.split() if word in self.w2v_model
        ]
        if len(embeddings) == 0:
            # Handle the case where there are no embeddings due to OOV words or empty sentences
            embeddings = np.zeros((1, self.embedding_dim))
        return (
            torch.tensor(np.array(embeddings), dtype=torch.float).to(device),
            torch.tensor(label, dtype=torch.long).to(device),
            sentence,
        )

    def sort_by_confidence(self, model):
        model.eval()
        confidences = []
        with torch.no_grad():
            for data, label, sentence in self:
                output = model(data.unsqueeze(0))
                confidence = torch.max(torch.softmax(output, dim=1))
                confidences.append((confidence.item(), sentence, label))
        confidences.sort(reverse=True, key=lambda x: x[0])
        new_data = pd.DataFrame(
            [(s, l.item()) for _, s, l in confidences],
            columns=["sentence", "label"],
        )
        self.data = new_data


def collate_fn(batch):
    data, labels, _ = zip(*batch)
    # Handle padding here, pad all sequences to be the same length
    data_padded = pad_sequence(data, batch_first=True, padding_value=0.0)
    labels = torch.stack(labels)  # Use stack to keep labels as tensor
    return data_padded, labels


class GRUModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(GRUModel, self).__init__()
        self.gru = nn.GRU(input_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        _, hidden = self.gru(x)
        return self.fc(hidden.squeeze(0))


def train_model(model, train_loader, criterion, optimizer):
    model.train()
    total_loss = 0
    for data, labels in train_loader:
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    return total_loss


def evaluate_model(models, loader, criterion):
    for model in models:
        model.eval()
    total_loss = 0
    correct = 0
    with torch.no_grad():
        for data, labels in loader:
            outputs = [model(data) for model in models]
            output = torch.mean(torch.stack(outputs), dim=0)
            loss = criterion(output, labels)
            total_loss += loss.item()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(labels.view_as(pred)).sum().item()
    accuracy = correct / len(loader.dataset)
    return total_loss, accuracy


def main():
    train_dataset = SST2Dataset("./SST-2/train.csv", w2v_model)
    validation_dataset = SST2Dataset("./SST-2/validation.csv", w2v_model)
    test_dataset = SST2Dataset("./SST-2/test.csv", w2v_model)

    train_loader = DataLoader(
        train_dataset, batch_size=32, collate_fn=collate_fn, shuffle=False
    )
    validation_loader = DataLoader(
        validation_dataset, batch_size=32, collate_fn=collate_fn, shuffle=False
    )
    test_loader = DataLoader(
        test_dataset, batch_size=32, collate_fn=collate_fn, shuffle=False
    )

    gru1 = GRUModel(300, 128, 2).to(device)  # Assuming Word2Vec embeddings are 300-dimensional
    gru2 = GRUModel(300, 128, 2).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer1 = Adam(gru1.parameters())
    optimizer2 = Adam(gru2.parameters())

    num_epochs = 10
    for epoch in range(num_epochs):
        if epoch % 2 == 0:
            print(f"Training GRU1 on Epoch {epoch+1}")
            train_model(gru1, train_loader, criterion, optimizer1)
            train_dataset.sort_by_confidence(gru1)
        else:
            print(f"Training GRU2 on Epoch {epoch+1}")
            train_model(gru2, train_loader, criterion, optimizer2)
            train_dataset.sort_by_confidence(gru2)

        train_loader = DataLoader(
            train_dataset, batch_size=32, collate_fn=collate_fn, shuffle=False
        )
        loss, accuracy = evaluate_model([gru1, gru2], validation_loader, criterion)
        print(f"Epoch {epoch+1}: Validation Loss: {loss:.4f}, Accuracy: {accuracy:.4f}")

    _, test_accuracy = evaluate_model([gru1, gru2], test_loader, criterion)
    print(f"Test Accuracy: {test_accuracy:.4f}")

    # 保存模型
    torch.save(gru1.state_dict(), "gru1.pth")
    torch.save(gru2.state_dict(), "gru2.pth")


if __name__ == "__main__":
    main()
