"""
情感分类完整修复版（兼容PyTorch<2.0）
"""
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from datasets import load_dataset
from transformers import get_linear_schedule_with_warmup
from collections import Counter  # 新增
from tqdm import tqdm  # 新增

# ▼▼▼▼▼▼▼▼▼▼ ImprovedLSTMModel 类定义之前的代码 ▼▼▼▼▼▼▼▼▼▼
# 设备配置
RANDOM_SEED = 42
torch.manual_seed(RANDOM_SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(RANDOM_SEED)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载数据集
dataset = load_dataset("SetFit/emotion")
labels = sorted(set(dataset["train"]["label_text"]))  # 获取有序标签列表

# ====================
# 模型3: 改进版LSTM
# ====================

class ImprovedLSTMModel(nn.Module):
    def __init__(self, embedding_matrix, hidden_dim=256, dropout=0.3):
        super().__init__()
        self.embedding = nn.Embedding.from_pretrained(embedding_matrix, freeze=True)
        self.lstm = nn.LSTM(
            embedding_matrix.size(1),
            hidden_dim,
            batch_first=True,
            bidirectional=True,
            num_layers=2,
            dropout=dropout
        )
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(hidden_dim*2, len(labels))
    
    def forward(self, x):
        x = self.embedding(x)  # (batch_size, seq_len, embedding_dim)
        output, (hidden, _) = self.lstm(x)  # hidden: (num_layers*2, batch, hidden_dim)
        
        # 拼接最后两个隐藏状态
        hidden = torch.cat((hidden[-2], hidden[-1]), dim=1)  # (batch, hidden_dim*2)
        hidden = self.dropout(hidden)
        return self.fc(hidden)

def run_lstm(glove_path="glove.6B.50d.txt"):
    print("\n" + "="*20 + " Training LSTM " + "="*20)
    
    # 改进的词表构建
    texts = [ex["text"] for ex in dataset["train"]]
    word_counts = Counter()
    for text in texts:
        word_counts.update(word.lower() for word in text.split())
    
    # 保留前20000个高频词
    sorted_words = [word for word, _ in word_counts.most_common(20000)]
    word2idx = {word: i+1 for i, word in enumerate(sorted_words)}
    word2idx["<unk>"] = 0  # 显式未知词标记
    vocab_size = len(word2idx)
    print(f"构建词表完成，词表大小: {vocab_size}")

    # 增强词向量加载
    embedding_dim = 50
    embedding_matrix = torch.zeros((vocab_size, embedding_dim), device=device)
    found = 0
    
    with open(glove_path, encoding='utf-8') as f:
        for line in f:
            parts = line.rstrip().split()
            word = parts[0]
            if word in word2idx:
                vector = np.asarray(parts[1:], dtype=np.float32)
                if len(vector) != embedding_dim:
                    continue
                embedding_matrix[word2idx[word]] = torch.from_numpy(vector)
                found += 1
    
    print(f"词向量覆盖率: {found}/{vocab_size} ({found/vocab_size:.1%})")

    # 数据处理
    label_encoder = LabelEncoder()
    train_labels = label_encoder.fit_transform(dataset["train"]["label"])
    test_labels = label_encoder.transform(dataset["test"]["label"])
    
    # 类别权重平衡
    class_counts = np.bincount(train_labels)
    class_weights = 1. / torch.tensor(class_counts, dtype=torch.float)
    class_weights = class_weights.to(device)
    print("类别分布:", class_counts)

    # 数据集类定义
    class LSTMDataset(Dataset):
        def __init__(self, texts, labels, word2idx, max_len=128):
            self.texts = texts
            self.labels = labels
            self.word2idx = word2idx
            self.max_len = max_len
        
        def __len__(self):
            return len(self.texts)
        
        def __getitem__(self, idx):
            text = self.texts[idx]
            indices = [self.word2idx.get(word.lower(), 0) for word in text.split()[:self.max_len]]
            indices += [0] * (self.max_len - len(indices))
            return {
                "input_ids": torch.LongTensor(indices),
                "labels": torch.LongTensor([self.labels[idx]])
            }

    # 创建数据集
    train_dataset = LSTMDataset(
        [ex["text"] for ex in dataset["train"]],
        train_labels,
        word2idx
    )
    test_dataset = LSTMDataset(
        [ex["text"] for ex in dataset["test"]],
        test_labels,
        word2idx
    )

    # 数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=256,
        shuffle=True,
        num_workers=4,
        pin_memory=True
    )
    test_loader = DataLoader(
        test_dataset,
        batch_size=512,
        num_workers=4,
        pin_memory=True
    )

    # 模型初始化
    model = ImprovedLSTMModel(embedding_matrix).to(device)
    print(f"模型参数量: {sum(p.numel() for p in model.parameters()):,}")

    # 优化配置
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0005, weight_decay=1e-4)
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=100,
        num_training_steps=len(train_loader)*15
    )
    criterion = nn.CrossEntropyLoss(weight=class_weights)

    # 训练循环
    best_acc = 0.0
    early_stop_counter = 0
    
    for epoch in range(15):
        model.train()
        total_loss = 0.0
        progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1}/15")
        
        for batch in progress_bar:
            inputs = batch["input_ids"].to(device, non_blocking=True)
            labels = batch["labels"].squeeze().to(device, non_blocking=True)
            
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            
            nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            scheduler.step()
            
            total_loss += loss.item()
            progress_bar.set_postfix(loss=loss.item())

        # 评估
        model.eval()
        all_preds = []
        all_labels = []
        
        with torch.no_grad():
            for batch in tqdm(test_loader, desc="Evaluating"):
                inputs = batch["input_ids"].to(device)
                labels = batch["labels"].squeeze().cpu().numpy()
                
                outputs = model(inputs)
                preds = torch.argmax(outputs, dim=1).cpu().numpy()
                
                all_preds.extend(preds)
                all_labels.extend(labels)
        
        acc = accuracy_score(all_labels, all_preds)
        print(f"Epoch {epoch+1} | Loss: {total_loss/len(train_loader):.4f} | Acc: {acc:.2%}")

        # 早停机制
        if acc > best_acc:
            best_acc = acc
            early_stop_counter = 0
            torch.save(model.state_dict(), "best_lstm.pth")
            print(f"发现新最佳模型，准确率: {acc:.2%}")
        else:
            early_stop_counter += 1
            if early_stop_counter >= 3:
                print(f"早停触发，最佳准确率: {best_acc:.2%}")
                break

if __name__ == "__main__":
    run_lstm()  # 运行前确保glove文件路径正确