#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
模型训练工具
"""

import os
import time
import logging
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from tqdm import tqdm
from .metrics import compute_metrics


class Trainer:
    """模型训练器"""
    
    def __init__(self, model, config, train_loader, val_loader=None):
        """
        初始化训练器
        
        Args:
            model: 模型
            config: 配置字典
            train_loader: 训练数据加载器
            val_loader: 验证数据加载器
        """
        self.logger = logging.getLogger()
        self.model = model
        self.config = config
        self.train_loader = train_loader
        self.val_loader = val_loader
        
        # 设置设备
        self.device = torch.device(config.get('system', {}).get('device', 'cuda' if torch.cuda.is_available() else 'cpu'))
        self.model = self.model.to(self.device)
        self.logger.info(f"使用设备: {self.device}")
        
        # 训练配置
        self.train_config = config.get('training', {})
        self.num_epochs = self.train_config.get('num_epochs', 100)
        self.batch_size = self.train_config.get('batch_size', 16)
        self.learning_rate = self.train_config.get('learning_rate', 0.0001)
        self.weight_decay = self.train_config.get('weight_decay', 0.0001)
        self.patience = self.train_config.get('patience', 10)
        self.clip_gradient = self.train_config.get('clip_gradient', 1.0)
        self.log_interval = config.get('system', {}).get('log_interval', 10)
        self.use_mixed_precision = self.train_config.get('use_mixed_precision', False)
        
        # 创建优化器
        optimizer_name = self.train_config.get('optimizer', 'adam').lower()
        if optimizer_name == 'adam':
            self.optimizer = optim.Adam(
                self.model.parameters(),
                lr=self.learning_rate,
                weight_decay=self.weight_decay
            )
        elif optimizer_name == 'sgd':
            self.optimizer = optim.SGD(
                self.model.parameters(),
                lr=self.learning_rate,
                momentum=0.9,
                weight_decay=self.weight_decay
            )
        elif optimizer_name == 'adamw':
            self.optimizer = optim.AdamW(
                self.model.parameters(),
                lr=self.learning_rate,
                weight_decay=self.weight_decay
            )
        else:
            raise ValueError(f"不支持的优化器: {optimizer_name}")
        
        # 创建学习率调度器
        scheduler_name = self.train_config.get('lr_scheduler', 'cosine').lower()
        if scheduler_name == 'cosine':
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer,
                T_max=self.num_epochs
            )
        elif scheduler_name == 'step':
            self.scheduler = optim.lr_scheduler.StepLR(
                self.optimizer,
                step_size=30,
                gamma=0.1
            )
        elif scheduler_name == 'plateau':
            self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer,
                mode='min',
                factor=0.1,
                patience=5,
                verbose=True
            )
        else:
            self.scheduler = None
        
        # 创建损失函数
        self.criterion = nn.CrossEntropyLoss()
        
        # 设置混合精度
        self.scaler = torch.cuda.amp.GradScaler() if self.use_mixed_precision else None
        
        # 设置TensorBoard
        if config.get('system', {}).get('use_tensorboard', True):
            self.output_dir = config.get('output_dir', './output')
            self.tensorboard_dir = os.path.join(self.output_dir, 'tensorboard')
            os.makedirs(self.tensorboard_dir, exist_ok=True)
            self.writer = SummaryWriter(self.tensorboard_dir)
        else:
            self.writer = None
        
        # 创建检查点目录
        self.checkpoint_dir = os.path.join(config.get('output_dir', './output'), 'checkpoints')
        os.makedirs(self.checkpoint_dir, exist_ok=True)
        
        # 跟踪的指标
        self.best_val_loss = float('inf')
        self.best_val_acc = 0.0
        self.early_stopping_counter = 0
    
    def train(self):
        """训练模型"""
        self.logger.info("开始训练...")
        self.logger.info(f"训练轮数: {self.num_epochs}")
        self.logger.info(f"批量大小: {self.batch_size}")
        self.logger.info(f"学习率: {self.learning_rate}")
        
        # 训练循环
        for epoch in range(1, self.num_epochs + 1):
            start_time = time.time()
            
            # 训练一个轮次
            train_loss, train_metrics = self._train_epoch(epoch)
            
            # 验证
            if self.val_loader is not None:
                val_loss, val_metrics = self._validate_epoch(epoch)
            else:
                val_loss, val_metrics = None, None
            
            # 更新学习率
            if self.scheduler is not None:
                if isinstance(self.scheduler, optim.lr_scheduler.ReduceLROnPlateau):
                    self.scheduler.step(val_loss)
                else:
                    self.scheduler.step()
            
            # 记录日志
            epoch_time = time.time() - start_time
            self.logger.info(f"轮次 {epoch}/{self.num_epochs} 完成，用时 {epoch_time:.2f}s")
            self.logger.info(f"训练损失: {train_loss:.4f}, 训练准确率: {train_metrics['accuracy']:.4f}")
            
            if val_loss is not None:
                self.logger.info(f"验证损失: {val_loss:.4f}, 验证准确率: {val_metrics['accuracy']:.4f}")
            
            # 记录TensorBoard
            if self.writer is not None:
                self.writer.add_scalar('Loss/train', train_loss, epoch)
                self.writer.add_scalar('Accuracy/train', train_metrics['accuracy'], epoch)
                
                if val_loss is not None:
                    self.writer.add_scalar('Loss/val', val_loss, epoch)
                    self.writer.add_scalar('Accuracy/val', val_metrics['accuracy'], epoch)
                
                current_lr = self.optimizer.param_groups[0]['lr']
                self.writer.add_scalar('LearningRate', current_lr, epoch)
            
            # 保存检查点
            checkpoint = {
                'epoch': epoch,
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
                'train_loss': train_loss,
                'val_loss': val_loss,
                'train_metrics': train_metrics,
                'val_metrics': val_metrics,
                'config': self.config
            }
            
            checkpoint_path = os.path.join(self.checkpoint_dir, f"checkpoint_epoch_{epoch}.pth")
            
            # 是否只保存最佳模型
            save_best_only = self.train_config.get('save_best_only', True)
            
            # 定期保存检查点
            checkpoint_interval = self.config.get('system', {}).get('checkpoint_interval', 5)
            if epoch % checkpoint_interval == 0 and not save_best_only:
                torch.save(checkpoint, checkpoint_path)
                self.logger.info(f"保存检查点到 {checkpoint_path}")
            
            # 保存最佳模型
            if val_loss is not None and val_loss < self.best_val_loss:
                self.best_val_loss = val_loss
                self.best_val_acc = val_metrics['accuracy']
                self.early_stopping_counter = 0
                
                # 保存最佳模型
                best_checkpoint_path = os.path.join(self.checkpoint_dir, "best_model.pth")
                torch.save(checkpoint, best_checkpoint_path)
                self.logger.info(f"保存最佳模型到 {best_checkpoint_path}, 验证损失: {val_loss:.4f}, 验证准确率: {val_metrics['accuracy']:.4f}")
            else:
                self.early_stopping_counter += 1
            
            # 早停
            if self.train_config.get('early_stopping', True) and self.early_stopping_counter >= self.patience:
                self.logger.info(f"提前停止训练，已连续 {self.patience} 轮未改善验证损失")
                break
        
        self.logger.info("训练完成")
        
        # 关闭TensorBoard写入器
        if self.writer is not None:
            self.writer.close()
        
        # 返回最佳验证指标
        return {
            'best_val_loss': self.best_val_loss,
            'best_val_accuracy': self.best_val_acc
        }
    
    def _train_epoch(self, epoch):
        """训练一个轮次"""
        self.model.train()
        total_loss = 0.0
        all_targets = []
        all_predictions = []
        
        # 创建进度条
        pbar = tqdm(self.train_loader, desc=f"轮次 {epoch} 训练", ncols=100)
        
        for i, batch in enumerate(pbar):
            # 将数据移到设备上
            inputs = batch['frames'].to(self.device)
            targets = batch['label'].to(self.device)
            
            # 清除梯度
            self.optimizer.zero_grad()
            
            # 前向传播
            if self.use_mixed_precision:
                with torch.cuda.amp.autocast():
                    outputs = self.model(inputs)
                    loss = self.criterion(outputs, targets)
            else:
                outputs = self.model(inputs)
                loss = self.criterion(outputs, targets)
            
            # 反向传播
            if self.use_mixed_precision:
                self.scaler.scale(loss).backward()
                if self.clip_gradient > 0:
                    self.scaler.unscale_(self.optimizer)
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_gradient)
                self.scaler.step(self.optimizer)
                self.scaler.update()
            else:
                loss.backward()
                if self.clip_gradient > 0:
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_gradient)
                self.optimizer.step()
            
            # 更新统计信息
            total_loss += loss.item()
            
            # 计算预测结果
            _, predicted = torch.max(outputs, 1)
            all_targets.extend(targets.cpu().numpy())
            all_predictions.extend(predicted.cpu().numpy())
            
            # 更新进度条
            pbar.set_postfix({'loss': f"{loss.item():.4f}"})
            
            # 记录批次日志
            if (i + 1) % self.log_interval == 0:
                lr = self.optimizer.param_groups[0]['lr']
                self.logger.debug(f"轮次 {epoch}, 批次 {i+1}/{len(self.train_loader)}, 损失: {loss.item():.4f}, 学习率: {lr:.6f}")
        
        # 计算平均损失
        avg_loss = total_loss / len(self.train_loader)
        
        # 计算指标
        metrics = compute_metrics(np.array(all_targets), np.array(all_predictions))
        
        return avg_loss, metrics
    
    def _validate_epoch(self, epoch):
        """验证一个轮次"""
        self.model.eval()
        total_loss = 0.0
        all_targets = []
        all_predictions = []
        
        with torch.no_grad():
            for batch in tqdm(self.val_loader, desc=f"轮次 {epoch} 验证", ncols=100):
                # 将数据移到设备上
                inputs = batch['frames'].to(self.device)
                targets = batch['label'].to(self.device)
                
                # 前向传播
                outputs = self.model(inputs)
                loss = self.criterion(outputs, targets)
                
                # 更新统计信息
                total_loss += loss.item()
                
                # 计算预测结果
                _, predicted = torch.max(outputs, 1)
                all_targets.extend(targets.cpu().numpy())
                all_predictions.extend(predicted.cpu().numpy())
        
        # 计算平均损失
        avg_loss = total_loss / len(self.val_loader)
        
        # 计算指标
        metrics = compute_metrics(np.array(all_targets), np.array(all_predictions))
        
        return avg_loss, metrics 