import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch.optim import AdamW
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import get_linear_schedule_with_warmup
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, f1_score
import time
import os
import csv

# 设置随机种子保证可复现性
SEED = 42
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(SEED)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

# 参数配置
MAX_LEN = 128  # 最大序列长度
BATCH_SIZE = 64  # 增大批次大小加速训练
GRAD_ACCUM_STEPS = 2  # 梯度累积步数（模拟更大批次）
EPOCHS = 3  # BERT微调通常3-4轮足够
LEARNING_RATE = 2e-5
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MODEL_NAME = 'distilbert-base-uncased'  # 更快的蒸馏模型

# 自定义数据集类
class TextDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_len):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_len = max_len
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = self.labels[idx]
        
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_len,
            return_token_type_ids=False,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt',
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

# 训练函数（添加梯度累积）
def train_epoch(model, data_loader, optimizer, scheduler, device):
    model.train()
    total_loss = 0
    total_steps = 0
    
    for step, batch in enumerate(data_loader):
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)
        
        outputs = model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            labels=labels
        )
        
        loss = outputs.loss
        total_loss += loss.item()
        
        # 梯度累积
        loss = loss / GRAD_ACCUM_STEPS
        loss.backward()
        
        if (step + 1) % GRAD_ACCUM_STEPS == 0:
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            total_steps += 1
    
    return total_loss / len(data_loader)

# 评估函数
def eval_model(model, data_loader, device):
    model.eval()
    predictions = []
    true_labels = []
    total_loss = 0
    
    with torch.no_grad():
        for batch in data_loader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)
            
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels
            )
            
            loss = outputs.loss
            total_loss += loss.item()
            
            logits = outputs.logits
            _, preds = torch.max(logits, dim=1)
            
            predictions.extend(preds.cpu().numpy())
            true_labels.extend(labels.cpu().numpy())
    
    avg_loss = total_loss / len(data_loader)
    accuracy = accuracy_score(true_labels, predictions)
    f1 = f1_score(true_labels, predictions, average='weighted')
    
    return avg_loss, accuracy, f1

# 加载TSV文件
def load_tsv_data(file_path):
    texts = []
    labels = []
    
    with open(file_path, 'r', encoding='utf-8') as f:
        # 使用csv.reader指定制表符分隔
        reader = csv.reader(f, delimiter='\t')
        for row in reader:
            if len(row) < 3:
                continue  # 跳过无效行
            
            # 第一列是编号（忽略），第二列是文本，第三列是标签
            texts.append(row[1].strip())
            labels.append(row[2].strip())
    
    return texts, labels

# 主训练流程
def train_model(data_path):
    # 加载TSV格式数据
    texts, raw_labels = load_tsv_data(data_path)
    
    if len(texts) == 0:
        raise ValueError("未加载到有效数据，请检查文件格式和路径")
    
    print(f"数据集加载完成，共 {len(texts)} 条样本")
    
    # 标签编码（文本标签转数字）
    label_encoder = LabelEncoder()
    labels = label_encoder.fit_transform(raw_labels)
    num_classes = len(label_encoder.classes_)
    
    print(f"类别数量: {num_classes}，类别分布: {np.bincount(labels)}")
    
    # 分割数据集
    train_texts, val_texts, train_labels, val_labels = train_test_split(
        texts, labels, test_size=0.1, random_state=SEED, stratify=labels
    )
    
    print(f"训练集样本数: {len(train_texts)}, 验证集样本数: {len(val_texts)}")
    
    # 初始化tokenizer和模型
    tokenizer = BertTokenizer.from_pretrained(MODEL_NAME)
    model = BertForSequenceClassification.from_pretrained(
        MODEL_NAME, 
        num_labels=num_classes
    ).to(DEVICE)
    
    # 创建数据加载器
    train_dataset = TextDataset(train_texts, train_labels, tokenizer, MAX_LEN)
    val_dataset = TextDataset(val_texts, val_labels, tokenizer, MAX_LEN)
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=BATCH_SIZE,
        shuffle=True,
        num_workers=4,
        pin_memory=True  # 加速GPU传输
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=BATCH_SIZE * 2,  # 评估时使用更大批次
        num_workers=4,
        pin_memory=True
    )
    
    # 设置优化器和调度器
    optimizer = AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=1e-2)
    total_steps = len(train_loader) * EPOCHS // GRAD_ACCUM_STEPS
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=int(total_steps * 0.1),
        num_training_steps=total_steps
    )
    
    # 训练循环
    best_f1 = 0
    for epoch in range(EPOCHS):
        start_time = time.time()
        
        train_loss = train_epoch(model, train_loader, optimizer, scheduler, DEVICE)
        val_loss, val_acc, val_f1 = eval_model(model, val_loader, DEVICE)
        
        epoch_time = time.time() - start_time
        
        print(f'\nEpoch {epoch + 1}/{EPOCHS}')
        print(f'Train Loss: {train_loss:.4f} | Time: {epoch_time:.2f}s')
        print(f'Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f} | Val F1: {val_f1:.4f}')
        
        # 保存最佳模型
        if val_f1 > best_f1:
            # 创建模型保存目录
            os.makedirs('best_model', exist_ok=True)
            
            # 保存整个模型（包含分类头）
            model.save_pretrained('best_model')
            tokenizer.save_pretrained('best_model')
            
            # 保存标签编码器
            np.save('best_model/label_classes.npy', label_encoder.classes_)
            
            best_f1 = val_f1
            print(f'--- New best model saved (F1: {best_f1:.4f}) ---')
    
    print(f'\n训练完成! 最佳验证集F1: {best_f1:.4f}')
    print(f"模型和tokenizer已保存至 'best_model' 目录")
    print(f"标签编码器已保存: best_model/label_classes.npy")

if __name__ == "__main__":
    # 替换为您的数据文件路径
    DATA_PATH = "./data/dataset/OCEMOTION.csv"  # 可以是.tsv或.txt扩展名
    
    # 检查文件是否存在
    if not os.path.exists(DATA_PATH):
        print(f"错误: 文件 {DATA_PATH} 不存在!")
    else:
        train_model(DATA_PATH)