import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import OneCycleLR, CosineAnnealingLR, StepLR
import time
import numpy as np
from tqdm import tqdm

class Trainer:
    """
    训练器类
    """
    
    def __init__(self, model, train_loader, test_loader, device='cpu', lr=0.001,
                 weight_decay=1e-4, betas=(0.9, 0.999), early_stopping_patience=10,
                 gradient_clip=1.0, lr_scheduler_type='onecycle'):
        """
        初始化训练器
        
        参数:
        - model: 要训练的模型
        - train_loader: 训练数据加载器
        - test_loader: 测试数据加载器
        - device: 训练设备
        - lr: 学习率
        - weight_decay: 权重衰减
        - betas: Adam优化器的beta参数
        - early_stopping_patience: 早停耐心值
        - gradient_clip: 梯度裁剪阈值
        - lr_scheduler_type: 学习率调度器类型 ('onecycle', 'cosine', 'step')
        """
        self.model = model
        self.train_loader = train_loader
        self.test_loader = test_loader
        self.device = device
        self.lr = lr
        
        # 将模型移至指定设备
        self.model.to(self.device)
        
        # 损失函数
        self.criterion = nn.CrossEntropyLoss()
        
        # 优化器
        self.optimizer = optim.Adam(
            self.model.parameters(),
            lr=lr,
            weight_decay=weight_decay,
            betas=betas
        )
        
        # 学习率调度器
        self.lr_scheduler_type = lr_scheduler_type
        self.lr_scheduler = None
        
        # 训练历史
        self.train_loss_history = []
        self.test_loss_history = []
        self.train_acc_history = []
        self.test_acc_history = []
        self.lr_history = []
        
        # 初始化统一的历史记录结构
        self.history = {
            'epochs': [],
            'train_loss': [],
            'test_loss': [],
            'train_acc': [],
            'test_acc': [],
            'lr': []
        }
        
        # 可视化器
        self.visualizer = None
        
        # 早停相关参数
        self.early_stopping_patience = early_stopping_patience
        self.best_test_loss = float('inf')
        self.best_test_acc = 0.0
        self.patience_counter = 0
        self.best_model_weights = None
        # 用于检测性能急剧下降的阈值
        self.acc_drop_threshold = 5.0
        self.loss_increase_threshold = 2.0
        
        # 梯度裁剪
        self.gradient_clip = gradient_clip
        
        # 回调函数
        self.on_epoch_end_callbacks = []
        
        # 训练控制标志
        self.should_stop = False
        
    def stop_training(self):
        """请求停止训练"""
        self.should_stop = True
        print("[训练器] 收到停止训练请求")
    
    def reset_training_state(self):
        """重置训练状态"""
        self.should_stop = False
        self.patience_counter = 0
        self.best_test_acc = 0.0
        self.best_test_loss = float('inf')
        self.best_model_weights = None
    
    def on_epoch_end(self, callback):
        """
        注册每个epoch结束时的回调函数
        
        参数:
        - callback: 回调函数，接受trainer实例和epoch索引作为参数
        """
        self.on_epoch_end_callbacks.append(callback)
    
    def update_history(self, epoch, train_loss, test_loss, train_acc, test_acc, lr):
        """
        更新训练历史记录
        
        参数:
        - epoch: 当前轮次
        - train_loss: 训练损失
        - test_loss: 测试损失
        - train_acc: 训练准确率
        - test_acc: 测试准确率
        - lr: 当前学习率
        """
        self.history['epochs'].append(epoch)
        self.history['train_loss'].append(train_loss)
        self.history['test_loss'].append(test_loss)
        self.history['train_acc'].append(train_acc)
        self.history['test_acc'].append(test_acc)
        self.history['lr'].append(lr)
        
        # 如果有可视化器，更新可视化
        if self.visualizer:
            history_data = {
                'epoch': epoch,
                'train_loss': train_loss,
                'test_loss': test_loss,
                'train_acc': train_acc,
                'test_acc': test_acc,
                'lr': lr
            }
            self.visualizer.update(history_data)
    
    def _create_lr_scheduler(self, max_epochs=10, scheduler_params=None):
        """
        创建学习率调度器
        
        参数:
        - max_epochs: 最大训练轮数
        - scheduler_params: 调度器参数字典
        
        返回:
        - 学习率调度器实例
        """
        # 对于OneCycleLR，我们需要知道总迭代次数
        total_steps = len(self.train_loader) * max_epochs
        
        # 如果没有提供调度器参数，使用默认值
        if scheduler_params is None:
            scheduler_params = {
                'onecycle': {'max_lr_multiplier': 25.0, 'pct_start': 0.3},
                'cosine': {'T_max': max_epochs, 'min_lr_multiplier': 0.01},
                'step': {'step_size': max(1, max_epochs // 4), 'gamma': 0.5}
            }
        
        # 确保所有调度器参数都存在
        if 'onecycle' not in scheduler_params:
            scheduler_params['onecycle'] = {'max_lr_multiplier': 25.0, 'pct_start': 0.3}
        if 'cosine' not in scheduler_params:
            scheduler_params['cosine'] = {'T_max': max_epochs, 'min_lr_multiplier': 0.01}
        if 'step' not in scheduler_params:
            scheduler_params['step'] = {'step_size': max(1, max_epochs // 4), 'gamma': 0.5}
        
        if self.lr_scheduler_type == 'onecycle':
            # 使用配置的参数
            max_lr_multiplier = scheduler_params['onecycle'].get('max_lr_multiplier', 25.0)
            pct_start = scheduler_params['onecycle'].get('pct_start', 0.3)
            max_lr = self.lr * max_lr_multiplier
            print(f"OneCycleLR初始化: max_lr={max_lr}, total_steps={total_steps}, pct_start={pct_start}")
            return OneCycleLR(
                self.optimizer,
                max_lr=max_lr,
                total_steps=total_steps,
                pct_start=pct_start,
                anneal_strategy='cos',
                div_factor=max_lr_multiplier,
                final_div_factor=10000.0
            )
        elif self.lr_scheduler_type == 'cosine':
            # 使用配置的参数
            T_max = scheduler_params['cosine'].get('T_max', max_epochs)
            min_lr_multiplier = scheduler_params['cosine'].get('min_lr_multiplier', 0.01)
            eta_min = self.lr * min_lr_multiplier
            print(f"CosineAnnealingLR初始化: T_max={T_max}, eta_min={eta_min}")
            return CosineAnnealingLR(
                self.optimizer,
                T_max=T_max,
                eta_min=eta_min
            )
        elif self.lr_scheduler_type == 'step':
            # 使用配置的参数
            step_size = scheduler_params['step'].get('step_size', max(1, max_epochs // 4))
            gamma = scheduler_params['step'].get('gamma', 0.5)
            print(f"StepLR初始化: step_size={step_size}, gamma={gamma}")
            return StepLR(
                self.optimizer,
                step_size=step_size,
                gamma=gamma
            )
        else:
            # 默认不使用调度器
            return None
    
    def train_epoch(self):
        """
        训练epoch方法，支持梯度裁剪和学习率调度
        
        返回:
        - 平均训练损失
        - 训练准确率
        """
        self.model.train()
        
        running_loss = 0.0
        correct = 0
        total = 0
        
        # 初始化学习率历史，确保至少有一个值
        if not hasattr(self, 'lr_history'):
            self.lr_history = []
        
        with tqdm(self.train_loader, desc=f'训练', leave=False) as pbar:
            for i, (inputs, targets) in enumerate(pbar):
                # 检查是否应该停止训练
                if self.should_stop:
                    print("[训练器] 检测到停止请求，提前结束当前epoch")
                    break
                
                # 将数据移至设备
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                
                # 梯度清零
                self.optimizer.zero_grad()
                
                # 前向传播
                outputs = self.model(inputs)
                loss = self.criterion(outputs, targets)
                
                # 反向传播
                loss.backward()
                
                # 梯度裁剪
                if self.gradient_clip > 0:
                    nn.utils.clip_grad_norm_(self.model.parameters(), self.gradient_clip)
                
                # 参数更新
                self.optimizer.step()
                
                # 更新学习率（如果使用OneCycleLR）
                if self.lr_scheduler and self.lr_scheduler_type == 'onecycle':
                    self.lr_scheduler.step()
                
                # 记录当前学习率
                current_lr = self.optimizer.param_groups[0]['lr']
                
                # 统计损失和准确率
                running_loss += loss.item() * inputs.size(0)
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
                
                # 更新进度条
                pbar.set_postfix(loss=running_loss/total, acc=100.*correct/total, lr=f"{current_lr:.6f}")
        
        # 记录最后一个批次的学习率
        self.lr_history.append(current_lr)
        
        # 计算平均损失和准确率
        avg_loss = running_loss / len(self.train_loader.dataset)
        accuracy = 100. * correct / total
        
        return avg_loss, accuracy
    
    def test(self):
        """
        测试方法，记录最佳模型并实现更可靠的早停逻辑
        
        返回:
        - 平均测试损失
        - 测试准确率
        - 是否应该停止训练（布尔值）
        """
        self.model.eval()
        
        running_loss = 0.0
        correct = 0
        total = 0
        
        # 不计算梯度
        with torch.no_grad():
            with tqdm(self.test_loader, desc=f'测试', leave=False) as pbar:
                for inputs, targets in pbar:
                    # 检查是否应该停止训练
                    if self.should_stop:
                        print("[训练器] 检测到停止请求，提前结束测试")
                        break
                    
                    # 将数据移至设备
                    inputs, targets = inputs.to(self.device), targets.to(self.device)
                    
                    # 前向传播
                    outputs = self.model(inputs)
                    loss = self.criterion(outputs, targets)
                    
                    # 统计损失和准确率
                    running_loss += loss.item() * inputs.size(0)
                    _, predicted = outputs.max(1)
                    total += targets.size(0)
                    correct += predicted.eq(targets).sum().item()
                    
                    # 更新进度条
                    pbar.set_postfix(loss=running_loss/total, acc=100.*correct/total)
        
        # 计算平均损失和准确率
        avg_loss = running_loss / len(self.test_loader.dataset)
        accuracy = 100. * correct / total
        
        # 初始化最佳模型权重（如果尚未初始化）
        if self.best_model_weights is None:
            self.best_model_weights = {k: v.clone() for k, v in self.model.state_dict().items()}
        
        should_stop = False
        
        # 更新最佳模型 - 使用损失或准确率进行评估
        improvement = False
        if accuracy > self.best_test_acc:
            # 准确率有提升
            improvement = True
        elif accuracy == self.best_test_acc and avg_loss < self.best_test_loss:
            # 准确率相同但损失更低
            improvement = True
        
        if improvement:
            self.best_test_acc = accuracy
            self.best_test_loss = avg_loss
            self.patience_counter = 0
            # 保存最佳模型权重
            self.best_model_weights = {k: v.clone() for k, v in self.model.state_dict().items()}
            print(f"新的最佳模型: 准确率 {accuracy:.2f}%, 损失 {avg_loss:.4f}")
        else:
            # 早停计数增加
            self.patience_counter += 1
            print(f"早停计数: {self.patience_counter}/{self.early_stopping_patience}")
            
            # 检查是否达到或超过耐心值
            if self.patience_counter >= self.early_stopping_patience:
                should_stop = True
                print(f"[早停触发] 早停计数已达到或超过耐心值 {self.early_stopping_patience}")
        
        # 检查紧急早停条件 - 准确率急剧下降
        if self.best_test_acc > 0:
            acc_drop_pct = (self.best_test_acc - accuracy)
            if acc_drop_pct > self.acc_drop_threshold:
                print(f"[紧急早停触发] 准确率急剧下降 {acc_drop_pct:.2f}% (超过阈值 {self.acc_drop_threshold}%)")
                should_stop = True
        
        # 检查紧急早停条件 - 损失急剧上升
        if self.best_test_loss < float('inf') and avg_loss > self.best_test_loss * self.loss_increase_threshold:
            print(f"[紧急早停触发] 损失急剧上升 {avg_loss/self.best_test_loss:.2f}倍 (超过阈值 {self.loss_increase_threshold}倍)")
            should_stop = True
        
        return avg_loss, accuracy, should_stop
    
    def fit(self, epochs, early_stopping=True, patience=None, scheduler_params=None):
        """
        训练方法，支持早停和更可靠的学习率调度
        
        参数:
        - epochs: 训练轮数
        - early_stopping: 是否启用早停
        - patience: 早停耐心值
        - scheduler_params: 调度器参数字典
        """
        # 重置训练状态
        self.reset_training_state()
        
        # 如果提供了patience，更新早停参数
        if patience is not None:
            self.early_stopping_patience = patience
            print(f"[早停配置] 更新耐心值为: {self.early_stopping_patience}")
        
        # 重置早停计数器和最佳模型指标
        self.patience_counter = 0
        self.best_test_acc = 0.0
        self.best_test_loss = float('inf')
        self.best_model_weights = None
        
        # 重置学习率历史
        self.lr_history = []
        
        # 更新学习率调度器
        self.lr_scheduler = self._create_lr_scheduler(max_epochs=epochs, scheduler_params=scheduler_params)
        
        print(f"开始训练，共 {epochs} 轮")
        if early_stopping:
            print(f"早停已启用，耐心值: {self.early_stopping_patience}")
        print(f"学习率调度器: {self.lr_scheduler_type if self.lr_scheduler else '无'}")
        print(f"梯度裁剪: {self.gradient_clip}")
        
        start_time = time.time()
        
        for epoch in range(epochs):
            # 检查是否应该停止训练
            if self.should_stop:
                print(f"\n[训练取消] 用户在第 {epoch+1} 轮取消了训练")
                break
                
            print(f"\n轮次 {epoch+1}/{epochs}")
            
            # 训练一个epoch
            train_loss, train_acc = self.train_epoch()
            
            # 再次检查是否应该停止训练
            if self.should_stop:
                print(f"\n[训练取消] 用户在第 {epoch+1} 轮取消了训练")
                break
            
            # 在测试集上评估
            test_loss, test_acc, should_stop = self.test()
            
            # 再次检查是否应该停止训练
            if self.should_stop:
                print(f"\n[训练取消] 用户在第 {epoch+1} 轮取消了训练")
                break
            
            # 记录历史
            self.train_loss_history.append(train_loss)
            self.test_loss_history.append(test_loss)
            self.train_acc_history.append(train_acc)
            self.test_acc_history.append(test_acc)
            
            # 确保lr_history不为空
            if not self.lr_history:
                self.lr_history.append(self.optimizer.param_groups[0]['lr'])
            
            # 更新统一的历史记录
            self.update_history(epoch, train_loss, test_loss, train_acc, test_acc, self.lr_history[-1])
            
            # 打印结果
            print(f"训练损失: {train_loss:.4f}, 训练准确率: {train_acc:.2f}%")
            print(f"测试损失: {test_loss:.4f}, 测试准确率: {test_acc:.2f}%")
            print(f"当前学习率: {self.lr_history[-1]:.6f}")
            
            # 调用回调函数
            for callback in self.on_epoch_end_callbacks:
                callback(self, epoch)
            
            # 更新学习率（对于非OneCycleLR的调度器）
            if self.lr_scheduler and self.lr_scheduler_type != 'onecycle':
                self.lr_scheduler.step()
            
            # 检查早停条件
            if early_stopping and should_stop:
                print(f"\n[早停触发] 训练在第 {epoch+1} 轮提前停止")
                break
            
            # 打印早停状态信息用于调试
            if early_stopping:
                print(f"[早停状态] 当前计数: {self.patience_counter}/{self.early_stopping_patience}, 最佳准确率: {self.best_test_acc:.2f}%")
        
        total_time = time.time() - start_time
        print(f"\n训练完成，总耗时: {total_time:.2f} 秒")
        print(f"最佳测试准确率: {self.best_test_acc:.2f}%")
        
        # 加载最佳模型权重
        if self.best_model_weights:
            print("加载最佳模型权重...")
            self.model.load_state_dict(self.best_model_weights)
        else:
            print("警告: 没有保存的最佳模型权重")
    
    def get_best_accuracy(self):
        """
        获取最佳测试准确率
        
        返回:
        - 最佳测试准确率
        """
        return self.best_test_acc
    
    def predict(self, input_tensor):
        """
        使用模型进行预测
        
        参数:
        - input_tensor: 输入张量
        
        返回:
        - 预测类别
        - 概率分布
        """
        self.model.eval()
        
        with torch.no_grad():
            # 将输入移至设备
            input_tensor = input_tensor.to(self.device)
            
            # 前向传播
            outputs = self.model(input_tensor)
            
            # 计算概率
            probabilities = torch.softmax(outputs, dim=1)
            
            # 获取预测类别
            _, predicted = torch.max(probabilities, 1)
            
            # 调试信息
            print(f"预测结果分布: {probabilities.cpu().numpy()}")
        
        return predicted, probabilities
    
    def get_activations(self, input_tensor):
        """
        激活值获取方法，支持更全面的层激活值提取
        
        参数:
        - input_tensor: 输入张量
        
        返回:
        - 激活值字典，键为层名称，值为激活值张量
        """
        activations = {}
        
        # 定义一个递归函数来注册所有层的钩子
        def register_hooks(module, prefix=''):
            hooks = []
            for name, child in module.named_children():
                if isinstance(child, (nn.Conv2d, nn.Linear, nn.ReLU, nn.LeakyReLU, 
                                     nn.BatchNorm2d, nn.Dropout, nn.MaxPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d)):
                    # 为重要层注册钩子
                    current_name = f"{prefix}{name}" if prefix else name
                    def hook_fn(m, i, o, layer_name=current_name):
                        activations[layer_name] = o.detach()
                    hook = child.register_forward_hook(hook_fn)
                    hooks.append(hook)
                # 递归注册子模块的钩子
                child_hooks = register_hooks(child, prefix=f'{prefix}{name}.')
                hooks.extend(child_hooks)
            return hooks
        
        # 注册钩子
        hooks = register_hooks(self.model)
        
        # 前向传播
        with torch.no_grad():
            input_tensor = input_tensor.to(self.device)
            self.model(input_tensor)
        
        # 移除钩子
        for hook in hooks:
            hook.remove()
        
        return activations
    
    def save(self, filepath):
        """
        保存模型和训练状态
        
        参数:
        - filepath: 保存路径
        """
        # 保存调度器参数和当前学习率
        scheduler_params = {
            'lr_scheduler_type': self.lr_scheduler_type,
            'weight_decay': self.optimizer.param_groups[0].get('weight_decay', 0),
            'betas': self.optimizer.param_groups[0].get('betas', (0.9, 0.999)),
            'early_stopping_patience': self.early_stopping_patience,
            'gradient_clip': self.gradient_clip,
            'acc_drop_threshold': self.acc_drop_threshold,
            'loss_increase_threshold': self.loss_increase_threshold,
            'current_lr': self.optimizer.param_groups[0]['lr']
        }
        
        # 保存调度器的状态
        scheduler_state = None
        if self.lr_scheduler:
            try:
                scheduler_state = self.lr_scheduler.state_dict()
            except:
                print("警告：无法保存调度器状态")
        
        state = {
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state': scheduler_state,
            'best_model_weights': self.best_model_weights,
            'train_loss_history': self.train_loss_history,
            'test_loss_history': self.test_loss_history,
            'train_acc_history': self.train_acc_history,
            'test_acc_history': self.test_acc_history,
            'lr_history': self.lr_history,
            'best_test_acc': self.best_test_acc,
            'scheduler_params': scheduler_params
        }
        torch.save(state, filepath)
        print(f"模型已保存至 {filepath}")
    
    def load(self, filepath):
        """
        加载模型和训练状态
        
        参数:
        - filepath: 加载路径
        """
        state = torch.load(filepath, map_location=self.device)
        
        # 处理不同格式的检查点
        if isinstance(state, dict):
            # 如果是我们新格式的检查点（包含train_params）
            if 'train_params' in state and 'model_state_dict' in state:
                # 如果model是CNN实例，我们应该尝试使用保存的配置重新创建模型
                # 但这里我们只加载权重，因为模型实例通常在外部创建
                self.model.load_state_dict(state['model_state_dict'])
                print("从新格式检查点加载模型权重")
            # 标准训练器检查点格式
            elif 'model_state_dict' in state and 'optimizer_state_dict' in state:
                self.model.load_state_dict(state['model_state_dict'])
                self.optimizer.load_state_dict(state['optimizer_state_dict'])
                self.best_model_weights = state.get('best_model_weights')
                self.train_loss_history = state.get('train_loss_history', [])
                self.test_loss_history = state.get('test_loss_history', [])
                self.train_acc_history = state.get('train_acc_history', [])
                self.test_acc_history = state.get('test_acc_history', [])
                self.lr_history = state.get('lr_history', [])
                self.best_test_acc = state.get('best_test_acc', 0.0)
            # 简单的state_dict格式
            else:
                try:
                    self.model.load_state_dict(state)
                    print("作为简单的model_state_dict加载成功")
                except Exception as e:
                    print(f"直接加载model_state_dict失败，可能需要先使用正确配置创建模型: {str(e)}")
                    raise
        else:
            # 尝试作为简单的state_dict加载
            try:
                self.model.load_state_dict(state)
                print("作为简单的model_state_dict加载成功")
            except Exception as e:
                print(f"直接加载model_state_dict失败，可能需要先使用正确配置创建模型: {str(e)}")
                raise
        
        # 加载调度器参数
        if isinstance(state, dict) and 'scheduler_params' in state:
            scheduler_params = state['scheduler_params']
            self.lr_scheduler_type = scheduler_params.get('lr_scheduler_type', 'onecycle')
            self.early_stopping_patience = scheduler_params.get('early_stopping_patience', 10)
            self.gradient_clip = scheduler_params.get('gradient_clip', 1.0)
            self.acc_drop_threshold = scheduler_params.get('acc_drop_threshold', 5.0)
            self.loss_increase_threshold = scheduler_params.get('loss_increase_threshold', 2.0)
            
            # 恢复学习率
            if 'current_lr' in scheduler_params:
                for param_group in self.optimizer.param_groups:
                    param_group['lr'] = scheduler_params['current_lr']
            
            print(f"已加载调度器参数: 类型={self.lr_scheduler_type}, 早停耐心值={self.early_stopping_patience}")
        
        # 尝试恢复调度器状态
        if 'scheduler_state' in state and state['scheduler_state'] is not None:
            try:
                print("已加载调度器状态（将在下次训练时应用）")
            except Exception as e:
                print(f"警告：无法恢复调度器状态: {e}")
        
        # 将模型移至当前设备
        self.model.to(self.device)
        print(f"模型已从 {filepath} 加载完成")