# ... [保持所有数据预处理、词向量训练、数据集划分代码与原始CNN版本完全一致] ...
from typing import F

from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
import re
import nltk
import numpy as np
import torch
import torch.nn as nn
import math
from torch.utils.data import Dataset, DataLoader
from gensim.models import Word2Vec
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import confusion_matrix
import seaborn as sns
import time

# 中文显示设置
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 手动指定nltk数据路径
nltk.download('punkt')
nltk.download('punkt_tab')
# 禁用自动下载(记得改成自己的路径)
nltk.data.path.append('C:/Users/xiaoxiao/PycharmProjects/pythonProject3/compare/nltk_data')
try:
    nltk.data.find('tokenizers/punkt')
except LookupError:
    print("请手动完成以下步骤：")
    print("1. 访问 https://github.com/nltk/nltk_data/raw/gh-pages/packages/tokenizers/punkt.zip")
    print("2. 解压到 C:/Users/xiaoxiao/PycharmProjects/pythonProject3/compare/nltk_data/tokenizers/punkt/")
    print("3. 重新创建以下目录结构：")
    print("   nltk_data/")
    print("   └── tokenizers/")
    print("       └── punkt/")
    print("           ├── PY3/")
    print("           │   └── english.pickle")
    print("           └── README")
    exit()
# --------------------------------------
# 数据加载和预处理
data = fetch_20newsgroups(subset='all',
                          categories=None,
                          shuffle=True,
                          random_state=42,
                          data_home='C:/Users/xiaoxiao/PycharmProjects/pythonProject3/compare/nltk_data/tokenizers/punkt/PY3',
                          download_if_missing=False  # 关闭自动下载
                          )
texts = data.data
labels = data.target


# 文本清洗函数
def clean_text(text):
    text = re.sub(r'\n', ' ', text)
    text = re.sub(r'\r', ' ', text)
    text = re.sub(r'\S*@\S*\s?', '', text)  # 移除邮箱
    text = re.sub(r'https?://\S+', '', text)  # 移除URL
    text = re.sub(r'[^a-zA-Z]', ' ', text)  # 保留字母
    text = text.lower().strip()
    return text


# 数据预处理
cleaned_texts = [clean_text(t) for t in texts]
tokenized_texts = [nltk.word_tokenize(t) for t in cleaned_texts]

# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(tokenized_texts, labels, test_size=0.2, random_state=42)

# --------------------------------------
# 词向量训练和预处理
w2v_model = Word2Vec(sentences=X_train, vector_size=100, window=5, min_count=5, workers=4)
w2v_model.train(X_train, total_examples=len(X_train), epochs=10)
word_vectors = w2v_model.wv

# 构建词汇表
PAD_TOKEN = "<PAD>"
UNK_TOKEN = "<UNK>"
word2idx = {PAD_TOKEN: 0, UNK_TOKEN: 1}
for word, idx in word_vectors.key_to_index.items():
    word2idx[word] = idx + 2  # 避免与特殊符号冲突

# 构建嵌入矩阵
embedding_matrix = np.zeros((len(word2idx), 100))
for word, idx in word2idx.items():
    if word in word_vectors:
        embedding_matrix[idx] = word_vectors[word]
embedding_matrix = torch.tensor(embedding_matrix, dtype=torch.float32)

# 序列转换和填充
# max_length = max(max(len(seq) for seq in X_train), max(len(seq) for seq in X_test))
max_length = 512  # 原计算方式会导致内存爆炸，统一控制输入长度


def text_to_ids(text, word2idx):
    return [word2idx.get(w, word2idx[UNK_TOKEN]) for w in text]


X_train_ids = [text_to_ids(t, word2idx) for t in X_train]
X_test_ids = [text_to_ids(t, word2idx) for t in X_test]


def pad_sequence(seq, max_len):
    # 保持两个模型的输入序列长度一致
    return seq[:max_len] + [word2idx[PAD_TOKEN]] * max(0, max_len - len(seq))


"""if len(seq) < max_len:
        seq = seq + [word2idx[PAD_TOKEN]] * (max_len - len(seq))
    else:
        seq = seq[:max_len]
    return seq"""

X_train_ids = [pad_sequence(s, max_length) for s in X_train_ids]
X_test_ids = [pad_sequence(s, max_length) for s in X_test_ids]

# 转换为Tensor
X_train_tensor = torch.tensor(X_train_ids, dtype=torch.long)
X_test_tensor = torch.tensor(X_test_ids, dtype=torch.long)
y_train_tensor = torch.tensor(y_train, dtype=torch.long)
y_test_tensor = torch.tensor(y_test, dtype=torch.long)


# --------------------------------------
# 数据加载器
class TextDataset(Dataset):
    def __init__(self, X, y):
        self.X = X
        self.y = y

    def __len__(self):
        return len(self.X)

    def __getitem__(self, idx):
        return self.X[idx], self.y[idx]


batch_size = 64
train_dataset = TextDataset(X_train_tensor, y_train_tensor)
test_dataset = TextDataset(X_test_tensor, y_test_tensor)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


# --------------------------------------
# 统一训练配置（同时适用于CNN和Transformer）
class Trainer:
    def __init__(self, model_class, model_name):
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model = model_class(
            vocab_size=len(word2idx),
            embed_dim=100,
            num_classes=20,
            embedding_matrix=embedding_matrix
        ).to(self.device)
        self.model_name = model_name
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=0.0007)  # 统一学习率
        self.epochs = 20  # 统一训练轮次

    def train(self):
        start_time = time.time()  # 记录开始时间
        train_losses = []
        train_accs = []
        test_accs = []

        for epoch in range(self.epochs):
            # 训练阶段
            self.model.train()
            total_loss, correct, total = 0, 0, 0
            for X_batch, y_batch in train_loader:
                X_batch, y_batch = X_batch.to(self.device), y_batch.to(self.device)
                self.optimizer.zero_grad()
                outputs = self.model(X_batch)
                loss = self.criterion(outputs, y_batch)
                loss.backward()
                self.optimizer.step()

                total_loss += loss.item() * X_batch.size(0)
                preds = torch.argmax(outputs, dim=1)
                correct += (preds == y_batch).sum().item()
                total += X_batch.size(0)

            # 验证阶段
            test_correct, test_total = 0, 0
            self.model.eval()
            with torch.no_grad():
                for X_batch, y_batch in test_loader:
                    X_batch, y_batch = X_batch.to(self.device), y_batch.to(self.device)
                    outputs = self.model(X_batch)
                    preds = torch.argmax(outputs, dim=1)
                    test_correct += (preds == y_batch).sum().item()
                    test_total += X_batch.size(0)

            # 记录指标
            train_loss = total_loss / total
            train_acc = correct / total
            test_acc = test_correct / test_total

            train_losses.append(train_loss)
            train_accs.append(train_acc)
            test_accs.append(test_acc)

            print(f"[{self.model_name}] Epoch {epoch + 1}/{self.epochs} | "
                  f"Loss: {train_loss:.4f} | Train Acc: {train_acc:.4f} | Test Acc: {test_acc:.4f}")

        end_time = time.time()  # 记录结束时间
        training_time = end_time - start_time
        print(f"{self.model_name} 总训练时间: {training_time:.2f}秒")
        return train_losses, train_accs, test_accs, training_time  # 新增返回值


# --------------------------------------
# CNN模型定义（与原始版本一致）
class CNN(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_classes, embedding_matrix=None):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
        if embedding_matrix is not None:
            self.embedding.weight.data.copy_(embedding_matrix)

        self.conv = nn.Conv1d(embed_dim, 128, kernel_size=3)  # 调整通道数到128，增加卷积通道以提升参数
        self.fc1 = nn.Linear(128, 64)  # 添加中间全连接层
        self.fc2 = nn.Linear(100, num_classes)  # 把原先fc层折合成两个
        self.dropout = nn.Dropout(0.4)  # 保持原始dropout率

    def forward(self, x):
        x = self.embedding(x)  # [batch, seq, emb]
        x = x.permute(0, 2, 1)  # [batch, emb, seq]
        x = F.relu(self.conv(x))  # 添加激活函数
        x = self.conv(x)  # [batch, channels, seq]
        x = torch.max(x, dim=2)[0]  # 全局最大池化
        x = self.fc1(x)  # 添加中间全连接层
        return self.fc2(self.dropout(x))


# Transformer模型定义（调整参数对齐）
class TransformerClassifier(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_classes, embedding_matrix=None):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
        if embedding_matrix is not None:
            self.embedding.weight.data.copy_(embedding_matrix)

        # 调整模型复杂度到与CNN近似
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=embed_dim,
            nhead=1,  # 约100/50=2
            dim_feedforward=150,  # 与CNN通道数一致
            dropout=0.4,  # 对齐CNN的dropout率
            batch_first=True  # 仅改变维度顺序，不影响模型结构
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=1)
        self.fc1 = nn.Linear(embed_dim,64)
        self.fc2 = nn.Linear(64, num_classes)
        self.dropout = nn.Dropout(0.4)

    def forward(self, x):
        padding_mask = (x == 0)
        x = self.embedding(x)
        # x = x.permute(1, 0, 2)  # Transformer默认格式 [seq, batch, emb],移除，保持batch在前
        x = self.transformer(x, src_key_padding_mask=padding_mask)
        x = self.fc1(x.mean(dim=1))  # 全局平均池化
        return self.fc2(self.dropout(x))


# --------------------------------------
# 并行训练对比
cnn_trainer = Trainer(CNN, "CNN")
transformer_trainer = Trainer(TransformerClassifier, "Transformer")

print("开始训练CNN...")
cnn_loss, cnn_train_acc, cnn_test_acc, cnn_time = cnn_trainer.train()

print("\n开始训练Transformer...")
trans_loss, trans_train_acc, trans_test_acc, trans_time = transformer_trainer.train()

# --------------------------------------
# 对比可视化
plt.figure(figsize=(12, 5))

# 准确率对比
plt.subplot(1, 2, 1)
plt.plot(cnn_test_acc, label='CNN测试准确率', linestyle='--')
plt.plot(trans_test_acc, label='Transformer测试准确率', linestyle='-.')
plt.title('模型准确率对比')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

# 损失对比
plt.subplot(1, 2, 2)
plt.plot(cnn_loss, label='CNN训练损失', linestyle='--')
plt.plot(trans_loss, label='Transformer训练损失', linestyle='-.')
plt.title('训练损失对比')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.tight_layout()
plt.show()

# 性能总结
print("\n最终性能对比：")
print(f"CNN最佳测试准确率: {max(cnn_test_acc):.4f}")
print(f"Transformer最佳测试准确率: {max(trans_test_acc):.4f}")
# 在性能总结部分添加时间对比
print(f"\n训练时间对比：")
print(f"CNN训练用时: {cnn_time:.2f}秒")
print(f"Transformer训练用时: {trans_time:.2f}秒")
print(f"速度差异: {abs(cnn_time - trans_time):.2f}秒 ({'CNN更快' if cnn_time < trans_time else 'Transformer更快'})")


# 模型参数对比
def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


print(f"\n参数数量对比：")
print(f"CNN参数总量: {count_parameters(cnn_trainer.model):,}")
print(f"Transformer参数总量: {count_parameters(transformer_trainer.model):,}")
