import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
from tqdm import tqdm  # 用于显示进度条
import re  # 正则表达式处理文本
import matplotlib.pyplot as plt  # 绘图库
import matplotlib  # 支持中文显示

'''
代码说明：
本代码实现了一个基于双向LSTM和注意力机制的文本分类模型，用于AG News数据集分类任务。
改进点包括：双向LSTM捕捉上下文语义、注意力机制聚焦关键信息、学习率调度优化训练过程、Dropout防止过拟合。
'''

# -----------------------------
# 1. 数据加载与预处理
# -----------------------------

# 设置matplotlib中文字体（Windows系统）
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 加载训练集和测试集（Parquet是一种高效的列式存储格式）
train_df = pd.read_parquet("data/ag_news/train.parquet")
test_df = pd.read_parquet("data/ag_news/test.parquet")

# 提取文本和标签列（假设数据列名为'text'和'labels'）
train_texts = train_df['text'].tolist()  # 训练文本列表
train_labels = train_df['labels'].tolist()  # 训练标签列表
test_texts = test_df['text'].tolist()  # 测试文本列表
test_labels = test_df['labels'].tolist()  # 测试标签列表


# -----------------------------
# 2. 文本预处理与分词
# -----------------------------

def tokenize(text):
    """文本预处理与分词函数
    1. 使用正则表达式去除非字母数字字符
    2. 转为小写
    3. 按空格分词
    """
    text = re.sub(r"[^a-zA-Z0-9\s]", "", text)  # 正则表达式过滤特殊符号
    tokens = text.lower().split()  # 转小写后按空格切分
    return tokens


# -----------------------------
# 3. 构建词汇表
# -----------------------------

from collections import Counter


def build_vocab(texts, min_freq=1):
    """基于训练文本构建词汇表
    1. 统计词频
    2. 过滤低频词（min_freq参数控制）
    3. 添加特殊标记<PAD>和<UNK>
    """
    counter = Counter()
    for text in texts:
        tokens = tokenize(text)
        counter.update(tokens)  # 统计所有训练文本的词频

    # 初始化词汇表（特殊标记）
    vocab = {"<PAD>": 0, "<UNK>": 1}  # PAD: 填充符，UNK: 未知词
    # 添加满足词频要求的词
    for word, freq in counter.items():
        if freq >= min_freq:
            vocab[word] = len(vocab)  # 每个词分配唯一索引
    return vocab


vocab = build_vocab(train_texts, min_freq=1)  # 构建词汇表（包含所有出现过的词）
vocab_size = len(vocab)
print("词汇表大小：", vocab_size)


# -----------------------------
# 4. 文本序列化与填充
# -----------------------------

def text_to_sequence(text, vocab):
    """将单个文本转换为整数序列"""
    tokens = tokenize(text)
    # 将词转换为索引，未知词用<UNK>（索引1）表示
    sequence = [vocab.get(token, vocab["<UNK>"]) for token in tokens]
    return sequence


max_seq_len = 128  # 序列最大长度（超参数，根据数据集调整）


def pad_sequence(seq, max_len):
    """序列填充/截断
    短于max_len则填充<PAD>，长于max_len则截断
    """
    if len(seq) < max_len:
        seq = seq + [vocab["<PAD>"]] * (max_len - len(seq))  # 填充
    else:
        seq = seq[:max_len]  # 截断
    return seq


def process_texts(texts, vocab, max_len):
    """批量处理文本为固定长度的整数序列"""
    sequences = [pad_sequence(text_to_sequence(text, vocab), max_len) for text in texts]
    return np.array(sequences)  # 转换为numpy数组


# 处理训练集和测试集
train_sequences = process_texts(train_texts, vocab, max_seq_len)
test_sequences = process_texts(test_texts, vocab, max_seq_len)


# -----------------------------
# 5. 自定义数据集类
# -----------------------------

class TextDataset(Dataset):
    """自定义PyTorch数据集类
    实现__len__和__getitem__方法，用于DataLoader加载
    """

    def __init__(self, sequences, labels, texts):
        self.sequences = sequences  # 文本序列（整数形式）
        self.labels = labels  # 标签
        self.texts = texts  # 原始文本（用于调试和可视化）

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return {
            "sequence": torch.tensor(self.sequences[idx], dtype=torch.long),  # 转换为LongTensor
            "label": torch.tensor(self.labels[idx], dtype=torch.long),
            "text": self.texts[idx]  # 保留原始文本
        }


# 创建数据集实例
train_dataset = TextDataset(train_sequences, train_labels, train_texts)
test_dataset = TextDataset(test_sequences, test_labels, test_texts)

# 创建数据加载器（批量加载数据）
batch_size = 32  # 超参数，根据显存调整
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)  # 训练集打乱顺序
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)  # 测试集不需要打乱


# -----------------------------
# 6. 模型定义（双向LSTM + 注意力机制）
# -----------------------------

class Attention(nn.Module):
    """注意力机制层
    通过全连接层计算注意力权重，对LSTM输出进行加权求和
    """

    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        # 定义注意力计算层（输入维度：hidden_dim*2，因为双向LSTM）
        self.attn = nn.Linear(hidden_dim * 2, 1)  # 输出每个时间步的注意力分数

    def forward(self, lstm_outputs):
        # lstm_outputs形状：[batch_size, seq_len, hidden_dim*2]
        attn_scores = self.attn(lstm_outputs)  # [batch_size, seq_len, 1]
        attn_scores = attn_scores.squeeze(2)  # 压缩维度：[batch_size, seq_len]
        attn_weights = torch.softmax(attn_scores, dim=1)  # 按序列维度做softmax归一化

        # 计算上下文向量：加权求和
        # unsqueeze(1)将权重变为[batch_size, 1, seq_len]，便于矩阵乘法
        context = torch.bmm(attn_weights.unsqueeze(1), lstm_outputs)  # [batch_size, 1, hidden_dim*2]
        context = context.squeeze(1)  # 压缩维度：[batch_size, hidden_dim*2]
        return context, attn_weights  # 返回上下文向量和注意力权重（可用于可视化）


class TextBiLSTMAttention(nn.Module):
    """文本分类模型架构
    包含Embedding层、双向LSTM、注意力机制、全连接层
    """

    def __init__(self, vocab_size, embed_dim, hidden_dim, num_classes, num_layers=1, dropout=0.5):
        super(TextBiLSTMAttention, self).__init__()
        # 词嵌入层（padding_idx=0表示用0填充的位置不参与梯度更新）
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
        self.dropout = nn.Dropout(dropout)  # Dropout层防止过拟合

        # 双向LSTM层
        self.lstm = nn.LSTM(
            input_size=embed_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            batch_first=True,  # 输入输出维度为[batch, seq_len, features]
            bidirectional=True,  # 使用双向LSTM
            dropout=dropout if num_layers > 1 else 0  # 多层LSTM时启用dropout
        )

        # 注意力层
        self.attention = Attention(hidden_dim)
        # 全连接分类层（输入维度：hidden_dim*2，因为双向LSTM拼接了前向和后向的隐藏状态）
        self.fc = nn.Linear(hidden_dim * 2, num_classes)

    def forward(self, x):
        # 输入x形状：[batch_size, seq_len]
        x = self.embedding(x)  # [batch_size, seq_len, embed_dim]
        x = self.dropout(x)  # 嵌入层后添加Dropout

        # LSTM前向传播
        lstm_out, _ = self.lstm(x)  # lstm_out形状：[batch_size, seq_len, hidden_dim*2]

        # 注意力计算
        attn_out, attn_weights = self.attention(lstm_out)  # attn_out形状：[batch_size, hidden_dim*2]
        attn_out = self.dropout(attn_out)  # 注意力输出后添加Dropout

        # 全连接层得到分类结果
        logits = self.fc(attn_out)  # [batch_size, num_classes]
        return logits


# 初始化模型
embed_dim = 128  # 词向量维度
hidden_dim = 128  # LSTM隐藏层维度
num_classes = len(set(train_labels))  # 类别数（从训练标签中获取）
model = TextBiLSTMAttention(
    vocab_size=vocab_size,
    embed_dim=embed_dim,
    hidden_dim=hidden_dim,
    num_classes=num_classes,
    num_layers=1,  # LSTM层数
    dropout=0.5  # Dropout概率
)

# 设备选择（优先使用GPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)  # 将模型移动到指定设备

# -----------------------------
# 7. 训练配置
# -----------------------------

# 定义损失函数（交叉熵损失，适用于多分类任务）
criterion = nn.CrossEntropyLoss()
# 定义优化器（Adam优化器，学习率初始值1e-3）
optimizer = optim.Adam(model.parameters(), lr=1e-3)
# 学习率调度器（每5个epoch将学习率乘以0.5）
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)

# -----------------------------
# 8. 训练与评估循环
# -----------------------------

num_epochs = 15  # 训练轮数
# 记录训练过程中的指标
train_loss_history = []
train_acc_history = []
test_loss_history = []
test_acc_history = []

for epoch in range(num_epochs):
    # 训练阶段
    model.train()  # 设置模型为训练模式（启用Dropout等）
    running_loss = 0.0  # 累计损失
    correct = 0  # 正确预测数
    total = 0  # 总样本数

    # 使用tqdm显示进度条
    for batch in tqdm(train_loader, desc=f"训练 Epoch {epoch + 1}/{num_epochs}"):
        sequences = batch["sequence"].to(device)  # 输入序列
        labels = batch["label"].to(device)  # 真实标签

        # 梯度清零
        optimizer.zero_grad()

        # 前向传播
        outputs = model(sequences)
        # 计算损失
        loss = criterion(outputs, labels)
        # 反向传播
        loss.backward()
        # 参数更新
        optimizer.step()

        # 统计指标
        running_loss += loss.item() * sequences.size(0)  # 累加批次损失
        _, predicted = torch.max(outputs, 1)  # 获取预测类别
        correct += (predicted == labels).sum().item()  # 累加正确数
        total += labels.size(0)  # 累加总样本数

    # 计算epoch平均损失和准确率
    train_loss = running_loss / total
    train_acc = correct / total
    train_loss_history.append(train_loss)
    train_acc_history.append(train_acc)

    # 测试阶段
    model.eval()  # 设置模型为评估模式（关闭Dropout等）
    running_loss = 0.0
    correct = 0
    total = 0

    with torch.no_grad():  # 关闭梯度计算
        for batch in tqdm(test_loader, desc=f"评估 Epoch {epoch + 1}/{num_epochs}"):
            sequences = batch["sequence"].to(device)
            labels = batch["label"].to(device)
            outputs = model(sequences)
            loss = criterion(outputs, labels)

            running_loss += loss.item() * sequences.size(0)
            _, predicted = torch.max(outputs, 1)
            correct += (predicted == labels).sum().item()
            total += labels.size(0)

    test_loss = running_loss / total
    test_acc = correct / total
    test_loss_history.append(test_loss)
    test_acc_history.append(test_acc)

    # 更新学习率
    scheduler.step()

    # 打印epoch结果
    print(f"Epoch {epoch + 1}/{num_epochs}: "
          f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, "
          f"Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}")


# -----------------------------
# 9. 可视化训练过程
# -----------------------------

def plot_metrics(history, metric_name, title, filename):
    """绘制训练指标曲线并保存为图片"""
    plt.figure(figsize=(8, 5))
    plt.plot(range(1, num_epochs + 1), history, marker='o', linestyle='-')
    plt.title(title)
    plt.xlabel('Epoch')
    plt.ylabel(metric_name)
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.savefig(filename)  # 保存图片
    plt.close()  # 关闭当前图表，避免内存泄漏


# 绘制训练/测试损失和准确率曲线
plot_metrics(train_loss_history, "Loss", "训练损失", "train_loss_bilstm_attn.png")
plot_metrics(train_acc_history, "Accuracy", "训练准确率", "train_acc_bilstm_attn.png")
plot_metrics(test_loss_history, "Loss", "测试损失", "test_loss_bilstm_attn.png")
plot_metrics(test_acc_history, "Accuracy", "测试准确率", "test_acc_bilstm_attn.png")
print("训练过程图已保存。")