import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
from tqdm import tqdm
import re
import matplotlib.pyplot as plt
import matplotlib
'''
消融实验：使用单向lstm
LSTM层：
将 bidirectional=True 修改为 bidirectional=False。
注意力层和全连接层：
原来的注意力层和全连接层均假设 LSTM 输出的特征维度为 hidden_dim*2，需要改为 hidden_dim。
具体来说，将注意力层中线性映射的输入维度从 hidden_dim*2 修改为 hidden_dim，同时全连接层的输入特征数也由 hidden_dim*2 改为 hidden_dim。
'''
# 设置 matplotlib 支持中文显示
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

# -----------------------------
# 1. 加载数据集（假设数据存储在 parquet 文件中）
train_df = pd.read_parquet("data/ag_news/train.parquet")
test_df = pd.read_parquet("data/ag_news/test.parquet")

# 假设数据集中有 'text'（文本）和 'label'（类别）两列
train_texts = train_df['text'].tolist()
train_labels = train_df['labels'].tolist()
test_texts = test_df['text'].tolist()
test_labels = test_df['labels'].tolist()

# -----------------------------
# 2. 文本预处理与分词
def tokenize(text):
    # 去除标点符号，只保留字母和数字，转为小写，然后按空格分词
    text = re.sub(r"[^a-zA-Z0-9\s]", "", text)
    tokens = text.lower().split()
    return tokens


# -----------------------------
# 3. 构建词汇表（基于训练集）
from collections import Counter


def build_vocab(texts, min_freq=1):
    counter = Counter()
    for text in texts:
        tokens = tokenize(text)
        counter.update(tokens)
    # 预留 PAD 和 UNK 标记
    vocab = {"<PAD>": 0, "<UNK>": 1}
    for word, freq in counter.items():
        if freq >= min_freq:
            vocab[word] = len(vocab)
    return vocab


vocab = build_vocab(train_texts, min_freq=1)
vocab_size = len(vocab)
print("词汇表大小：", vocab_size)


# -----------------------------
# 4. 将文本转换为整数序列，并补齐或截断到固定长度
def text_to_sequence(text, vocab):
    tokens = tokenize(text)
    sequence = [vocab.get(token, vocab["<UNK>"]) for token in tokens]
    return sequence


max_seq_len = 128  # 根据实际情况可调整


def pad_sequence(seq, max_len):
    if len(seq) < max_len:
        seq = seq + [vocab["<PAD>"]] * (max_len - len(seq))
    else:
        seq = seq[:max_len]
    return seq


def process_texts(texts, vocab, max_len):
    sequences = [pad_sequence(text_to_sequence(text, vocab), max_len) for text in texts]
    return np.array(sequences)


train_sequences = process_texts(train_texts, vocab, max_seq_len)
test_sequences = process_texts(test_texts, vocab, max_seq_len)


# -----------------------------
# 5. 自定义数据集类
class TextDataset(Dataset):
    def __init__(self, sequences, labels, texts):
        self.sequences = sequences
        self.labels = labels
        self.texts = texts  # 保留原始文本，便于后续查看预测结果

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return {
            "sequence": torch.tensor(self.sequences[idx], dtype=torch.long),
            "label": torch.tensor(self.labels[idx], dtype=torch.long),
            "text": self.texts[idx]
        }


train_dataset = TextDataset(train_sequences, train_labels, train_texts)
test_dataset = TextDataset(test_sequences, test_labels, test_texts)

batch_size = 32
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


# -----------------------------
# 6. 定义改进的模型：双向 LSTM + 注意力机制
class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        self.attn = nn.Linear(hidden_dim, 1)  # 双向 LSTM 输出维度为 hidden_dim*2

    def forward(self, lstm_outputs):
        # lstm_outputs: [batch_size, seq_len, hidden_dim*2]
        attn_scores = self.attn(lstm_outputs)  # [batch_size, seq_len, 1]
        attn_scores = attn_scores.squeeze(2)  # [batch_size, seq_len]
        attn_weights = torch.softmax(attn_scores, dim=1)  # [batch_size, seq_len]
        context = torch.bmm(attn_weights.unsqueeze(1), lstm_outputs)  # [batch_size, 1, hidden_dim*2]
        context = context.squeeze(1)  # [batch_size, hidden_dim*2]
        return context, attn_weights


class TextBiLSTMAttention(nn.Module):
    def __init__(self, vocab_size, embed_dim, hidden_dim, num_classes, num_layers=1, dropout=0.5):
        super(TextBiLSTMAttention, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
        self.dropout = nn.Dropout(dropout)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers,
                            batch_first=True, bidirectional=False, dropout=dropout if num_layers > 1 else 0)
        self.attention = Attention(hidden_dim)
        self.fc = nn.Linear(hidden_dim, num_classes)

    def forward(self, x):
        # x: [batch_size, seq_len]
        x = self.embedding(x)  # [batch_size, seq_len, embed_dim]
        x = self.dropout(x)
        lstm_out, _ = self.lstm(x)  # [batch_size, seq_len, hidden_dim*2]
        attn_out, attn_weights = self.attention(lstm_out)  # [batch_size, hidden_dim*2]
        attn_out = self.dropout(attn_out)
        logits = self.fc(attn_out)  # [batch_size, num_classes]
        return logits


# 模型参数设置
embed_dim = 128
hidden_dim = 128
num_classes = len(set(train_labels))
model = TextBiLSTMAttention(vocab_size=vocab_size, embed_dim=embed_dim, hidden_dim=hidden_dim,
                            num_classes=num_classes, num_layers=1, dropout=0.5)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# -----------------------------
# 7. 定义损失函数、优化器以及学习率调度器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)

# -----------------------------
# 8. 训练与评估循环
num_epochs = 100
train_loss_history = []
train_acc_history = []
test_loss_history = []
test_acc_history = []

for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    for batch in tqdm(train_loader, desc=f"训练 Epoch {epoch + 1}/{num_epochs}"):
        sequences = batch["sequence"].to(device)
        labels = batch["label"].to(device)

        optimizer.zero_grad()
        outputs = model(sequences)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * sequences.size(0)
        _, predicted = torch.max(outputs, 1)
        correct += (predicted == labels).sum().item()
        total += labels.size(0)

    train_loss = running_loss / total
    train_acc = correct / total
    train_loss_history.append(train_loss)
    train_acc_history.append(train_acc)

    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch in tqdm(test_loader, desc=f"评估 Epoch {epoch + 1}/{num_epochs}"):
            sequences = batch["sequence"].to(device)
            labels = batch["label"].to(device)
            outputs = model(sequences)
            loss = criterion(outputs, labels)
            running_loss += loss.item() * sequences.size(0)
            _, predicted = torch.max(outputs, 1)
            correct += (predicted == labels).sum().item()
            total += labels.size(0)

    test_loss = running_loss / total
    test_acc = correct / total
    test_loss_history.append(test_loss)
    test_acc_history.append(test_acc)

    scheduler.step()

    print(f"Epoch {epoch + 1}/{num_epochs}: Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, " \
          f"Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}")


# -----------------------------
# 9. 绘制训练过程图（保存为图片）
def plot_metrics(history, metric_name, title, filename):
    plt.figure(figsize=(8, 5))
    plt.plot(range(1, num_epochs + 1), history, marker='o')
    plt.title(title)
    plt.xlabel('Epoch')
    plt.ylabel(metric_name)
    plt.grid(True)
    plt.savefig(filename)
    plt.close()


plot_metrics(train_loss_history, "Loss", "训练损失", "train_loss_bilstm_attn.png")
plot_metrics(train_acc_history, "Accuracy", "训练准确率", "train_acc_bilstm_attn.png")
plot_metrics(test_loss_history, "Loss", "测试损失", "test_loss_bilstm_attn.png")
plot_metrics(test_acc_history, "Accuracy", "测试准确率", "test_acc_bilstm_attn.png")
print("训练过程图已保存。")
