import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
from tqdm import tqdm
import re
import matplotlib.pyplot as plt
import matplotlib

# 设置 matplotlib 支持中文显示
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

# -----------------------------
# 1. 加载数据集（假设数据存储在 parquet 文件中）
train_df = pd.read_parquet("data/ag_news/train.parquet")
test_df = pd.read_parquet("data/ag_news/test.parquet")

# 假设数据集中有 'text'（文本）和 'label'（类别）两列
train_texts = train_df['text'].tolist()
train_labels = train_df['labels'].tolist()
test_texts = test_df['text'].tolist()
test_labels = test_df['labels'].tolist()


# -----------------------------
# 2. 文本预处理与分词
def tokenize(text):
    # 去除标点，仅保留字母和数字，转为小写，并按空格分词
    text = re.sub(r"[^a-zA-Z0-9\s]", "", text)
    tokens = text.lower().split()
    return tokens


# -----------------------------
# 3. 构建词汇表（基于训练集）
from collections import Counter


def build_vocab(texts, min_freq=1):
    counter = Counter()
    for text in texts:
        tokens = tokenize(text)
        counter.update(tokens)
    # 构建词汇表，预留 PAD 和 UNK 标记
    vocab = {"<PAD>": 0, "<UNK>": 1}
    for word, freq in counter.items():
        if freq >= min_freq:
            vocab[word] = len(vocab)
    return vocab


vocab = build_vocab(train_texts, min_freq=1)
vocab_size = len(vocab)
print("词汇表大小：", vocab_size)


# -----------------------------
# 4. 将文本转换为整数序列，并补齐或截断到固定长度
def text_to_sequence(text, vocab):
    tokens = tokenize(text)
    sequence = [vocab.get(token, vocab["<UNK>"]) for token in tokens]
    return sequence


max_seq_len = 128  # 可根据实际情况调整


def pad_sequence(seq, max_len):
    if len(seq) < max_len:
        seq = seq + [vocab["<PAD>"]] * (max_len - len(seq))
    else:
        seq = seq[:max_len]
    return seq


def process_texts(texts, vocab, max_len):
    sequences = [pad_sequence(text_to_sequence(text, vocab), max_len) for text in texts]
    return np.array(sequences)


train_sequences = process_texts(train_texts, vocab, max_seq_len)
test_sequences = process_texts(test_texts, vocab, max_seq_len)


# -----------------------------
# 5. 自定义数据集类
class TextDataset(Dataset):
    def __init__(self, sequences, labels, texts):
        self.sequences = sequences
        self.labels = labels
        self.texts = texts  # 保留原始文本（用于后续显示预测结果，可选）

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        item = {
            "sequence": torch.tensor(self.sequences[idx], dtype=torch.long),
            "label": torch.tensor(self.labels[idx], dtype=torch.long),
            "text": self.texts[idx]
        }
        return item


train_dataset = TextDataset(train_sequences, train_labels, train_texts)
test_dataset = TextDataset(test_sequences, test_labels, test_texts)

batch_size = 32
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


# -----------------------------
# 6. 定义从头训练的 CNN 模型（基于 Kim’s CNN 架构）
class TextCNN(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_classes, kernel_sizes=[3, 4, 5], num_filters=100, dropout=0.5):
        super(TextCNN, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
        self.convs = nn.ModuleList([
            nn.Conv2d(1, num_filters, (k, embed_dim)) for k in kernel_sizes
        ])
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(len(kernel_sizes) * num_filters, num_classes)

    def forward(self, x):
        # x: [batch_size, max_seq_len]
        x = self.embedding(x)  # [batch_size, max_seq_len, embed_dim]
        x = x.unsqueeze(1)  # [batch_size, 1, max_seq_len, embed_dim]
        conv_results = []
        for conv in self.convs:
            c = torch.relu(conv(x))  # [batch_size, num_filters, L_out, 1]
            c = c.squeeze(3)  # [batch_size, num_filters, L_out]
            c = torch.max(c, dim=2)[0]  # [batch_size, num_filters]
            conv_results.append(c)
        out = torch.cat(conv_results, dim=1)  # [batch_size, num_filters * len(kernel_sizes)]
        out = self.dropout(out)
        logits = self.fc(out)  # [batch_size, num_classes]
        return logits


# 模型参数设置
embed_dim = 128
# 根据训练集标签的种类数确定类别数
num_classes = len(set(train_labels))
model = TextCNN(vocab_size=vocab_size, embed_dim=embed_dim, num_classes=num_classes)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# -----------------------------
# 7. 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)

# -----------------------------
# 8. 训练和评估
num_epochs = 10
train_loss_history = []
train_acc_history = []
test_loss_history = []
test_acc_history = []

for epoch in range(num_epochs):
    # 训练阶段
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    for batch in tqdm(train_loader, desc=f"训练 Epoch {epoch + 1}/{num_epochs}"):
        sequences = batch["sequence"].to(device)
        labels = batch["label"].to(device)

        optimizer.zero_grad()
        outputs = model(sequences)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * sequences.size(0)
        _, predicted = torch.max(outputs, 1)
        correct += (predicted == labels).sum().item()
        total += labels.size(0)

    train_loss = running_loss / total
    train_acc = correct / total
    train_loss_history.append(train_loss)
    train_acc_history.append(train_acc)

    # 评估阶段
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch in tqdm(test_loader, desc=f"评估 Epoch {epoch + 1}/{num_epochs}"):
            sequences = batch["sequence"].to(device)
            labels = batch["label"].to(device)
            outputs = model(sequences)
            loss = criterion(outputs, labels)
            running_loss += loss.item() * sequences.size(0)
            _, predicted = torch.max(outputs, 1)
            correct += (predicted == labels).sum().item()
            total += labels.size(0)

    test_loss = running_loss / total
    test_acc = correct / total
    test_loss_history.append(test_loss)
    test_acc_history.append(test_acc)

    print(
        f"Epoch {epoch + 1}/{num_epochs}: Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}")


# -----------------------------
# 9. 绘制训练过程图（保存为图片）
def plot_metrics(history, metric_name, title, filename):
    plt.figure(figsize=(8, 5))
    plt.plot(range(1, num_epochs + 1), history, marker='o')
    plt.title(title)
    plt.xlabel('Epoch')
    plt.ylabel(metric_name)
    plt.grid(True)
    plt.savefig(filename)
    plt.close()


plot_metrics(train_loss_history, "Loss", "训练损失", "train_loss_cnn.png")
plot_metrics(train_acc_history, "Accuracy", "训练准确率", "train_acc_cnn.png")
plot_metrics(test_loss_history, "Loss", "测试损失", "test_loss_cnn.png")
plot_metrics(test_acc_history, "Accuracy", "测试准确率", "test_acc_cnn.png")
print("训练过程图已保存。")
