"""
知识蒸馏训练模块
包含训练循环、验证和模型保存等功能
"""

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingLR
from typing import Dict, Any, Optional
import os
from datetime import datetime
from tensorboardX import SummaryWriter

class DistillationTrainer:
    def __init__(self,
                 teacher_model: Any,
                 student_model: nn.Module,
                 train_dataloader: DataLoader,
                 val_dataloader: Optional[DataLoader] = None,
                 learning_rate: float = 1e-4,
                 num_epochs: int = 10,
                 device: str = "cuda",
                 output_dir: str = "训练示例/训练过程"):
        """
        初始化训练器
        """
        self.teacher = teacher_model
        self.student = student_model
        self.train_dataloader = train_dataloader
        self.val_dataloader = val_dataloader
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.device = device
        self.output_dir = output_dir
        
        # 创建输出目录
        self.checkpoint_dir = os.path.join(output_dir, "checkpoints")
        self.log_dir = os.path.join(output_dir, "tensorboard")
        os.makedirs(self.checkpoint_dir, exist_ok=True)
        os.makedirs(self.log_dir, exist_ok=True)
        
        # 初始化优化器和损失函数
        self.optimizer = AdamW(self.student.parameters(), lr=learning_rate)
        self.scheduler = CosineAnnealingLR(self.optimizer, T_max=num_epochs)
        self.loss_fn = DistillationLoss()
        
        # 初始化TensorBoard
        self.writer = SummaryWriter(self.log_dir)
        
        # 初始化日志文件
        self.log_file = os.path.join(output_dir, "training.log")
        
    def train(self):
        """完整训练流程"""
        best_val_loss = float('inf')
        
        for epoch in range(self.num_epochs):
            # 训练一个epoch
            train_loss = self._train_epoch(epoch)
            
            # 验证
            if self.val_dataloader is not None:
                val_loss = self._validate(epoch)
                
                # 保存最佳模型
                if val_loss < best_val_loss:
                    best_val_loss = val_loss
                    self.save_checkpoint(f"best_model_epoch_{epoch}.pt")
            
            # 更新学习率
            self.scheduler.step()
            
            # 记录日志
            self._log_epoch(epoch, train_loss, val_loss if self.val_dataloader else None)
        
        # 保存最终模型
        self.save_final_model()
    
    def _train_epoch(self, epoch: int) -> float:
        """训练一个epoch"""
        self.student.train()
        total_loss = 0
        
        for batch_idx, batch in enumerate(self.train_dataloader):
            loss = self.train_step(batch)
            total_loss += loss.item()
            
            # 记录到TensorBoard
            step = epoch * len(self.train_dataloader) + batch_idx
            self.writer.add_scalar('train/loss', loss.item(), step)
        
        return total_loss / len(self.train_dataloader)
    
    def train_step(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
        """单步训练"""
        self.optimizer.zero_grad()
        
        # 获取教师模型的输出
        with torch.no_grad():
            teacher_outputs = self.teacher(batch['input_ids'])
        
        # 获取学生模型的输出
        student_outputs = self.student(batch['input_ids'])
        
        # 计算损失
        loss = self.loss_fn(
            student_outputs['logits'],
            teacher_outputs['logits'],
            batch['labels']
        )
        
        # 反向传播
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.student.parameters(), max_norm=1.0)
        self.optimizer.step()
        
        return loss
    
    def _validate(self, epoch: int) -> float:
        """验证模型性能"""
        self.student.eval()
        total_loss = 0
        
        with torch.no_grad():
            for batch in self.val_dataloader:
                # 获取教师模型的输出
                teacher_outputs = self.teacher(batch['input_ids'])
                
                # 获取学生模型的输出
                student_outputs = self.student(batch['input_ids'])
                
                # 计算损失
                loss = self.loss_fn(
                    student_outputs['logits'],
                    teacher_outputs['logits'],
                    batch['labels']
                )
                total_loss += loss.item()
        
        avg_loss = total_loss / len(self.val_dataloader)
        self.writer.add_scalar('val/loss', avg_loss, epoch)
        
        return avg_loss
    
    def save_checkpoint(self, filename: str) -> None:
        """保存训练检查点"""
        checkpoint = {
            'epoch': self.current_epoch,
            'model_state_dict': self.student.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'loss': self.current_loss,
        }
        
        path = os.path.join(self.checkpoint_dir, filename)
        torch.save(checkpoint, path)
    
    def save_final_model(self) -> None:
        """保存最终模型"""
        final_dir = "训练示例/最终模型"
        os.makedirs(final_dir, exist_ok=True)
        
        # 保存量化后的模型
        quantized_model = torch.quantization.quantize_dynamic(
            self.student, {torch.nn.Linear}, dtype=torch.qint8
        )
        torch.save(quantized_model.state_dict(), os.path.join(final_dir, "model.bin"))
        
        # 保存配置文件
        self.student.config.save_pretrained(final_dir)
        
        # 保存分词器
        if hasattr(self, 'tokenizer'):
            self.tokenizer.save_pretrained(os.path.join(final_dir, "tokenizer"))
    
    def _log_epoch(self, epoch: int, train_loss: float, val_loss: Optional[float]) -> None:
        """记录训练日志"""
        log_entry = f"[{datetime.now().isoformat()}] Epoch {epoch}: train_loss={train_loss:.4f}"
        if val_loss is not None:
            log_entry += f", val_loss={val_loss:.4f}"
        log_entry += "\n"
        
        with open(self.log_file, "a", encoding="utf-8") as f:
            f.write(log_entry) 