'''
Training time: 582.54 seconds
Final Test Accuracy: 0.8386
'''
import os
import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from datasets import load_dataset
import matplotlib.pyplot as plt

# 控制是否跳过训练并直接加载已训练好的模型
skip_training = True  # 设置为 True 跳过训练，False 重新训练

# 配置模型存储路径
trained_model_path = "./results/cnn_attention_sentiment_model.pth"

# 加载本地数据集
data_files = {
    "train": "./imdb/plain_text/train-00000-of-00001.parquet",
    "test": "./imdb/plain_text/test-00000-of-00001.parquet",
}
dataset = load_dataset("parquet", data_files=data_files)

# 加载训练和测试数据
train_texts = [x["text"] for x in dataset["train"]]
train_labels = [x["label"] for x in dataset["train"]]
test_texts = [x["text"] for x in dataset["test"]]
test_labels = [x["label"] for x in dataset["test"]]

# 定义数据集类
class IMDBDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_length):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]
        tokens = self.tokenizer(text.lower().split())
        tokens = tokens[: self.max_length] + [0] * max(0, self.max_length - len(tokens))
        return torch.tensor(tokens, dtype=torch.long), torch.tensor(label, dtype=torch.long)

# 构建词汇表和分词器
def build_vocab(texts, max_vocab_size=20000):
    word_freq = {}
    for text in texts:
        for word in text.lower().split():
            word_freq[word] = word_freq.get(word, 0) + 1
    sorted_vocab = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
    vocab = {word: idx + 1 for idx, (word, _) in enumerate(sorted_vocab[:max_vocab_size])}  # 1-indexed
    vocab["<PAD>"] = 0  # Padding token
    return vocab

# 构建词汇表和分词器
vocab = build_vocab(train_texts)
tokenizer = lambda x: [vocab.get(word, 0) for word in x]  # 分词器

# 模型定义：CNN + Attention
class CNNAttention(nn.Module):
    def __init__(self, vocab_size, embedding_dim, num_filters, filter_sizes, output_dim, dropout):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)
        self.convs = nn.ModuleList([
            nn.Conv2d(1, num_filters, (fs, embedding_dim)) for fs in filter_sizes
        ])
        self.attention = nn.Linear(num_filters * len(filter_sizes), 1)
        self.fc = nn.Linear(num_filters * len(filter_sizes), output_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, text):
        embedded = self.embedding(text).unsqueeze(1)  # [batch_size, 1, seq_len, embedding_dim]
        conved = [torch.relu(conv(embedded)).squeeze(3) for conv in self.convs]
        pooled = [torch.max(conv, dim=2).values for conv in conved]
        cat = self.dropout(torch.cat(pooled, dim=1))  # [batch_size, num_filters * len(filter_sizes)]
        attention_weights = torch.softmax(self.attention(cat), dim=1)
        attended = cat * attention_weights
        return self.fc(attended)

# 超参数
embedding_dim = 100
num_filters = 128
filter_sizes = [3, 4, 5]
output_dim = 2
dropout = 0.5
batch_size = 64
max_length = 200
learning_rate = 1e-3
num_epochs = 10

if not skip_training:
    print("Training model...")

    # 数据集和数据加载器
    train_dataset = IMDBDataset(train_texts, train_labels, tokenizer, max_length)
    test_dataset = IMDBDataset(test_texts, test_labels, tokenizer, max_length)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)

    # 模型、损失函数和优化器
    model = CNNAttention(len(vocab), embedding_dim, num_filters, filter_sizes, output_dim, dropout)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 训练模型
    time_start = time.time()
    train_accuracies = []
    test_accuracies = []

    for epoch in range(num_epochs):
        model.train()
        epoch_loss = 0
        epoch_acc = 0
        for texts, labels in train_loader:
            optimizer.zero_grad()
            predictions = model(texts)
            loss = criterion(predictions, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            epoch_acc += (predictions.argmax(1) == labels).sum().item()

        train_accuracy = epoch_acc / len(train_dataset)
        train_accuracies.append(train_accuracy)

        # 验证
        model.eval()
        test_preds = []
        test_labels_list = []
        with torch.no_grad():
            for texts, labels in test_loader:
                predictions = model(texts)
                test_preds.extend(predictions.argmax(1).tolist())
                test_labels_list.extend(labels.tolist())
        test_accuracy = accuracy_score(test_labels_list, test_preds)
        test_accuracies.append(test_accuracy)

        print(f"Epoch {epoch+1}: Train Acc = {train_accuracy:.4f}, Test Acc = {test_accuracy:.4f}")

    # 保存模型
    torch.save(model.state_dict(), trained_model_path)
    time_end = time.time()
    print(f"Training time: {time_end - time_start:.2f} seconds")

    # 绘制准确率曲线
    plt.figure(figsize=(10, 5))
    plt.plot(range(1, num_epochs + 1), train_accuracies, label="Training Accuracy", marker="o")
    plt.plot(range(1, num_epochs + 1), test_accuracies, label="Validation Accuracy", marker="o")
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy")
    plt.title("Training and Validation Accuracy")
    plt.legend()
    plt.grid()
    plt.savefig("./picture/accuracy_cnn.png")
    print("Accuracy curve saved to ./results/accuracy_curve.png")

else:
    print("Loading pre-trained model...")
    model = CNNAttention(len(vocab), embedding_dim, num_filters, filter_sizes, output_dim, dropout)
    model.load_state_dict(torch.load(trained_model_path))

# 测试模型性能
model.eval()
test_preds = []
test_labels_list = []
with torch.no_grad():
    for texts, labels in DataLoader(IMDBDataset(test_texts, test_labels, tokenizer, max_length), batch_size=batch_size):
        predictions = model(texts)
        test_preds.extend(predictions.argmax(1).tolist())
        test_labels_list.extend(labels.tolist())
accuracy = accuracy_score(test_labels_list, test_preds)
print(f"Final Test Accuracy: {accuracy:.4f}")
