import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from datasets import load_dataset
from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
    TrainingArguments,
    Trainer,
    get_linear_schedule_with_warmup
)

# 修复tokenizers并行警告
os.environ["TOKENIZERS_PARALLELISM"] = "false"

# GPU配置
RANDOM_SEED = 42
torch.manual_seed(RANDOM_SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(RANDOM_SEED)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device} | CUDA: {torch.version.cuda}")

# 加载数据集
dataset = load_dataset("SetFit/emotion")
labels = sorted(set(dataset["train"]["label_text"]))  # 获取有序标签列表

# ====================
# 通用评估函数
# ====================
def compute_metrics(eval_pred):
    predictions, labels = eval_pred
    preds = np.argmax(predictions, axis=1)
    return {"accuracy": accuracy_score(labels, preds)}

# ====================
# 模型1: DistilBERT (修复版)
# ====================
def run_distilbert():
    print("\n" + "="*20 + " Training DistilBERT " + "="*20)
    
    tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
    model = AutoModelForSequenceClassification.from_pretrained(
        "distilbert-base-uncased", 
        num_labels=len(labels)
    ).to(device)

    # 数据预处理
    def tokenize(batch):
        return tokenizer(
            batch["text"],
            padding="max_length",
            truncation=True,
            max_length=64,  # 缩短序列长度加速训练
            return_tensors="pt"
        )
    
    tokenized_ds = dataset.map(tokenize, batched=True, batch_size=2048)
    tokenized_ds.set_format("torch", columns=["input_ids", "attention_mask", "label"])

    # 训练参数（3轮快速训练）
    training_args = TrainingArguments(
        output_dir="./distilbert_results",
        per_device_train_batch_size=128,
        per_device_eval_batch_size=256,
        evaluation_strategy="steps",
        eval_steps=100,  # 更频繁的评估
        logging_steps=100,
        learning_rate=3e-5,
        num_train_epochs=3,
        weight_decay=0.01,
        fp16=torch.cuda.is_available(),
        gradient_accumulation_steps=2,
        optim="adamw_torch",
        report_to="none",
        dataloader_num_workers=4,
        remove_unused_columns=False
    )

    # 训练器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_ds["train"],
        eval_dataset=tokenized_ds["test"],
        compute_metrics=compute_metrics  # 关键修复
    )

    # 训练与评估
    trainer.train()
    results = trainer.evaluate()
    print(f"DistilBERT 测试准确率: {results['eval_accuracy']:.2%}")

# ====================
# 模型2: TinyBERT (修复版)
# ====================
def run_tinybert():
    print("\n" + "="*20 + " Training TinyBERT " + "="*20)
    
    # 使用已验证可用的TinyBERT模型
    model_name = "prajjwal1/bert-tiny"
    
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForSequenceClassification.from_pretrained(
        model_name,
        num_labels=len(labels)
    ).to(device)

    # 数据预处理
    def tokenize(batch):
        return tokenizer(
            batch["text"],
            padding="max_length",
            truncation=True,
            max_length=64,
            return_tensors="pt"
        )
    
    tokenized_ds = dataset.map(tokenize, batched=True, batch_size=2048)
    tokenized_ds.set_format("torch", columns=["input_ids", "attention_mask", "label"])

    # 训练参数（4轮快速训练）
    training_args = TrainingArguments(
        output_dir="./tinybert_results",
        per_device_train_batch_size=256,
        per_device_eval_batch_size=512,
        evaluation_strategy="steps",
        eval_steps=50,
        logging_steps=50,
        learning_rate=5e-5,
        num_train_epochs=4,
        weight_decay=0.01,
        fp16=torch.cuda.is_available(),
        gradient_accumulation_steps=1,
        optim="adamw_torch",
        report_to="none",
        dataloader_num_workers=8,
        remove_unused_columns=False
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_ds["train"],
        eval_dataset=tokenized_ds["test"],
        compute_metrics=compute_metrics  # 关键修复
    )

    trainer.train()
    results = trainer.evaluate()
    print(f"TinyBERT 测试准确率: {results['eval_accuracy']:.2%}")


if __name__ == "__main__":
    # 运行模型（建议逐个运行）
    #  run_distilbert()  # 预计训练时间：3-5分钟
      run_tinybert()   # 预计训练时间：4-6分钟