#!/usr/bin/env python3
"""
训练器基类 - 基于trainer_api.py实现，支持不同训练器
"""

import os
import sys
import threading
import queue
import time
import logging
from multiprocessing import Value
from pathlib import Path
from abc import ABC, abstractmethod
import ctypes
import argparse
import json

import torch

# 使用独立的日志系统，不受其他脚本影响
from .custom_logger import get_logger
from .progress_hijacker import ProgressHijacker

# 为这个模块创建独立的logger
logger = get_logger("base_trainer_api")

class BaseTrainerAPI(ABC):
    """训练器API基类 - 基于trainer_api.py实现"""
    
    def __init__(self, trainer_module_name, trainer_class_name):
        """初始化基类训练器
        Args:
            trainer_module_name: 训练模块名称
            trainer_class_name: 训练器类名称
        """
        self.trainer_module_name = trainer_module_name
        self.trainer_class_name = trainer_class_name
        self.parser_setup_func_name = "setup_parser"  # 固定使用setup_parser
        
        # 导入训练模块
        self._import_training_module()
        
        # 初始化训练器
        if self.training_available:
            self.parser = self._get_parser_setup_func()()
            self.trainer = self.trainer_class()
        
        # 进度管理
        self.current_step = 0
        self.total_steps = 0
        self.current_loss = 0.0
        self.progress_callback = None  # 进度回调函数
        
        # 进度劫持管理器
        self.progress_hijacker = ProgressHijacker(self)
    
    def _get_parser_setup_func(self):
        """获取解析器设置函数"""
        if callable(self.parser_setup_func_name):
            # 如果传入的是函数对象，直接返回
            return self.parser_setup_func_name
        else:
            # 如果传入的是字符串，动态导入
            try:
                module = __import__(self.trainer_module_name, fromlist=[self.parser_setup_func_name])
                return getattr(module, self.parser_setup_func_name)
            except (ImportError, AttributeError) as e:
                logger.error(f"无法导入解析器设置函数 {self.parser_setup_func_name}: {e}")
                raise
    
    def _import_training_module(self):
        """导入训练模块"""
        try:
            # 动态导入训练模块
            module = __import__(self.trainer_module_name, fromlist=[self.trainer_class_name])
            self.trainer_class = getattr(module, self.trainer_class_name)
            
            self.training_available = True
            logger.info(f"[SUCCESS] {self.trainer_module_name} 训练模块导入成功")
        except ImportError as e:
            logger.error(f"[ERROR] {self.trainer_module_name} 训练模块导入失败: {e}")
            self.training_available = False
            self.trainer_class = None
    
    def set_progress_callback(self, callback):
        """设置进度回调函数
        Args:
            callback: 回调函数，接收参数 (step, total_steps, loss, status)
        """
        self.progress_callback = callback
    
    def get_training_progress(self):
        """获取训练进度"""
        return {
            "status": "训练中",
            "progress": (self.current_step / self.total_steps * 100) if self.total_steps > 0 else 0,
            "current_step": self.current_step,
            "total_steps": self.total_steps,
            "current_loss": self.current_loss
        }
    
    def _is_param_defined_in_parser(self, param_name):
        """检查参数是否在parser中定义"""
        try:
            for action in self.parser._actions:
                if action.dest == param_name:
                    return True
            return False
        except Exception as e:
            logger.warning(f"检查参数 {param_name} 时出错: {e}")
            return False

    def print_available_params(self):
        """打印所有可用的参数信息（调试用）"""
        try:
            logger.debug(f"\n{'='*80}")
            logger.debug(f"📋 PARSER参数详细信息 (共{len(self.parser._actions)}个)")
            logger.debug(f"{'='*80}")
            
            # 按照parser创建时的原始顺序显示所有参数
            for i, action in enumerate(self.parser._actions, 1):
                # 获取参数信息
                param_name = action.dest
                action_type = type(action).__name__
                
                # 根据action类型确定参数类型
                param_type = self._get_param_type(action)
                
                # 获取默认值
                default_value = self._get_default_value(action)
                
                # 获取帮助文本
                help_text = getattr(action, 'help', '无帮助信息')
                
                # 获取其他属性
                additional_attrs = self._get_additional_attributes(action)
                
                logger.debug(f"{i:3d}. {param_name}")
                logger.debug(f"     类型: {param_type} ({action_type})")
                logger.debug(f"     默认值: {default_value}")
                logger.debug(f"     帮助: {help_text}")
                
                # 显示额外属性
                if additional_attrs:
                    logger.debug(f"     其他属性: {additional_attrs}")
                
                logger.debug("")
            
            logger.debug(f"{'='*80}")
            
        except Exception as e:
            logger.error(f"打印参数列表时出错: {e}")
            import traceback
            logger.error(f"错误堆栈: {traceback.format_exc()}")
    
    def _get_param_type(self, action):
        """根据action类型确定参数类型"""
        action_type = type(action).__name__
        
        # 处理不同类型的action
        if action_type == '_StoreAction':
            # 检查type属性
            if hasattr(action, 'type') and action.type is not None:
                return str(action.type.__name__)
            else:
                return 'str'
        elif action_type == '_StoreTrueAction':
            return 'bool'
        elif action_type == '_StoreFalseAction':
            return 'bool'
        elif action_type == '_AppendAction':
            return 'list'
        elif action_type == '_CountAction':
            return 'int'
        else:
            return 'unknown'
    
    def _get_default_value(self, action):
        """获取参数的默认值"""
        try:
            if hasattr(action, 'default'):
                default = action.default
                if default is None:
                    return 'None'
                elif isinstance(default, str):
                    return f"'{default}'"
                else:
                    return str(default)
            else:
                return '无默认值'
        except Exception:
            return '获取失败'
    
    def _get_additional_attributes(self, action):
        """获取action的其他属性"""
        attrs = {}
        try:
            # 获取一些有用的属性
            if hasattr(action, 'choices') and action.choices is not None:
                attrs['choices'] = action.choices
            if hasattr(action, 'required'):
                attrs['required'] = action.required
            if hasattr(action, 'nargs') and action.nargs is not None:
                attrs['nargs'] = action.nargs
            if hasattr(action, 'const') and action.const is not None:
                attrs['const'] = action.const
            if hasattr(action, 'metavar') and action.metavar is not None:
                attrs['metavar'] = action.metavar
        except Exception:
            pass
        return attrs
    
    def _get_param_type_from_parser(self, param_name):
        """从parser中获取参数类型"""
        try:
            for action in self.parser._actions:
                if action.dest == param_name:
                    return self._get_param_type(action)
            return None
        except Exception as e:
            logger.warning(f"从parser获取参数 {param_name} 类型时出错: {e}")
            return None
    
    def compare_args(self, args1, args2, name1="args1", name2="args2"):
        """比较两个参数对象的差异"""
        try:
            logger.debug(f"\n{'='*80}")
            logger.debug(f"📊 参数比较: {name1} vs {name2}")
            logger.debug(f"{'='*80}")
            
            # 获取所有参数名
            all_params = set()
            for action in self.parser._actions:
                all_params.add(action.dest)
            
            # 比较每个参数
            for param in sorted(all_params):
                val1 = getattr(args1, param, None)
                val2 = getattr(args2, param, None)
                
                if val1 != val2:
                    logger.debug(f"❌ {param}:")
                    logger.debug(f"   {name1}: {val1}")
                    logger.debug(f"   {name2}: {val2}")
                else:
                    logger.debug(f"✅ {param}: {val1}")
            
            logger.debug(f"{'='*80}")
            
        except Exception as e:
            logger.error(f"比较参数时出错: {e}")
    
    def _validate_required_args(self, args):
        """验证必需参数"""
        try:
            # 定义关键必需参数及其描述
            critical_params = {
                "network_module": "LoRA网络模块类型（如networks.lora）",
                "network_dim": "LoRA网络维度（RANK）",
                "network_alpha": "LoRA alpha值",
                "pretrained_model_name_or_path": "预训练模型路径",
                "train_data_dir": "训练数据目录",
                "output_dir": "输出目录",
                "learning_rate": "学习率",
                "max_train_steps": "最大训练步数",
                "train_batch_size": "训练批次大小",
                "resolution": "训练分辨率",
            }
            
            missing_params = []
            invalid_params = []
            
            # 检查关键参数
            for param, description in critical_params.items():
                if not hasattr(args, param):
                    missing_params.append(f"{param} ({description})")
                elif getattr(args, param) is None or getattr(args, param) == "":
                    invalid_params.append(f"{param} ({description}) - 值为空")
            
            # 检查parser中标记为required的参数
            for action in self.parser._actions:
                if action.required and not hasattr(args, action.dest):
                    missing_params.append(action.dest)
            
            if missing_params or invalid_params:
                error_msg = "❌ 参数验证失败:\n"
                if missing_params:
                    error_msg += f"缺少参数: {', '.join(missing_params)}\n"
                if invalid_params:
                    error_msg += f"无效参数: {', '.join(invalid_params)}\n"
                error_msg += f"\n当前可用参数: {[attr for attr in dir(args) if not attr.startswith('_')]}"
                logger.error(error_msg)
                return False
            
            logger.info(f"✅ 参数验证通过，共检查 {len(critical_params)} 个关键参数")
            return True
        except Exception as e:
            logger.error(f"验证参数时出错: {e}")
            return False
    
    def _reset_all_strategies(self):
        """重置所有策略"""
        try:
            # 直接重置策略类变量，不依赖reset_strategy方法
            import library.strategy_base as strategy_base
            
            # 重置TokenizeStrategy
            if hasattr(strategy_base, 'TokenizeStrategy'):
                strategy_base.TokenizeStrategy._strategy = None
            
            # 重置TextEncodingStrategy
            if hasattr(strategy_base, 'TextEncodingStrategy'):
                strategy_base.TextEncodingStrategy._strategy = None
            
            # 重置TextEncoderOutputsCachingStrategy
            if hasattr(strategy_base, 'TextEncoderOutputsCachingStrategy'):
                strategy_base.TextEncoderOutputsCachingStrategy._strategy = None
            
            # 重置LatentsCachingStrategy
            if hasattr(strategy_base, 'LatentsCachingStrategy'):
                strategy_base.LatentsCachingStrategy._strategy = None
            
            logger.info("✅ 已重置所有策略")
        except Exception as e:
            logger.warning(f"重置策略时出错: {e}")
    
    def print_args_dict(self, args_dict):
        """打印参数字典"""
        logger.debug("📋 参数字典:")
        for key, value in args_dict.items():
            logger.debug(f"  {key}: {value}")
    
    def print_args_object(self, args):
        """打印args对象的所有参数值"""
        logger.debug("🔍 Args对象所有参数值:")
        for key, value in vars(args).items():
            logger.debug(f"  {key}: {repr(value)}")
    
    def _prepare_training(self, args_dict):
        """准备训练环境
        Args:
            args_dict: 训练参数字典
        Returns:
            tuple: (args, original_grad_state)
        """
        # 延迟导入train_util，使用与FLUX训练脚本相同的导入方式
        from library import train_util

        # 重置所有策略状态，防止重复设置错误
        self._reset_all_strategies()
        
        original_grad_state = torch.is_grad_enabled()  # 保存原始梯度状态
        
        try:
            # 设置梯度状态
            torch.set_grad_enabled(True)
            
            # 将字典转换为Namespace对象
            args = self.parser.parse_args([])  # 获取默认参数
            
            # 添加调试信息
            logger.debug(f"🚀 开始设置参数，共 {len(args_dict)} 个参数")
            
            # 检查config_file参数，如果文件不存在则删除该参数
            if 'config_file' in args_dict and args_dict['config_file']:
                config_path = args_dict['config_file']
                # 如果路径不以.toml结尾，添加.toml后缀
                if not config_path.endswith('.toml'):
                    config_path += '.toml'
                
                if os.path.exists(config_path):
                    logger.debug(f"📂 配置文件存在: {config_path}")
                else:
                    logger.warning(f"⚠️  配置文件不存在: {config_path}，将删除config_file参数")
                    del args_dict['config_file']

            # 打印完整的args_dict内容
            self.print_args_dict(args_dict)
            
            # 获取parser中定义的所有参数
            logger.debug(f"📋 Parser中定义的参数数量: {len(self.parser._actions)}")

            for key, value in args_dict.items():
                # 首先通过key和self.parser获取对应的参数信息
                param_action = None
                for action in self.parser._actions:
                    if action.dest == key:
                        param_action = action
                        break
                
                # 如果没有找到参数定义，说明这个key是错误的，跳过并提示
                if param_action is None:
                    logger.warning(f"❌ 参数 {key} 未在parser中定义，跳过")
                    continue

                # 检查参数值是否需要跳过（None、空字符串等）
                should_skip = False
                skip_reason = ""
                
                # 检查None值或空字符串 - 直接跳过，不设置任何值
                if value is None or (isinstance(value, str) and value.strip() == ""):
                    should_skip = True
                    skip_reason = f"值为{value}，跳过设置"
                
                # 如果需要跳过，则跳过并记录
                if should_skip:
                    logger.debug(f"ℹ️  跳过参数 {key}: {skip_reason}")
                    continue

                # 检查参数类型
                if isinstance(param_action, argparse._StoreAction):
                    # 处理普通参数（包括字符串、数字等）
                    try:
                        if hasattr(param_action, 'type') and param_action.type is not None:
                            # 有type定义的参数，直接设置值，让argparse自己处理类型转换
                            setattr(args, key, value)
                        else:
                            # 直接设置值
                            setattr(args, key, value)
                    except (ValueError, TypeError) as e:
                        logger.warning(f"⚠️  参数 {key} 类型转换失败: {e}，使用原始值")
                        setattr(args, key, value)
                
                elif isinstance(param_action, argparse._StoreTrueAction):
                    # 处理 action="store_true" 的布尔标志参数
                    if isinstance(value, bool):
                        if value is True:
                            setattr(args, key, True)
                    else:
                        logger.error(f"❌ 参数 {key} 需要布尔值，但传入了{type(value).__name__}: {value}")
                        continue
                
                else:
                    # 其他特殊action类型，打印警告日志并跳过
                    logger.warning(f"⚠️  参数 {key} 使用了不支持的action类型: {type(param_action).__name__}，跳过处理")
                    continue
            
            # 验证必需参数
            logger.debug("✅ 开始验证必需参数...")
            self._validate_required_args(args)
            logger.debug("✅ 必需参数验证完成")
            
            # 打印所有参数值用于调试
            self.print_args_object(args)
            
            # 验证参数并读取配置文件
            logger.debug("✅ 开始验证命令行参数...")
            train_util.verify_command_line_training_args(args)
            logger.debug("✅ 命令行参数验证完成")
            
            try:
                logger.debug(f"📂 开始读取配置文件: {getattr(args, 'config_file', 'None')}")
                args = train_util.read_config_from_file(args, self.parser)
                logger.debug("✅ 配置文件读取完成")
            except SystemExit as e:
                # 将SystemExit转换为普通异常
                logger.error(f"❌ 配置文件读取失败: {e}")
                raise RuntimeError(f"配置文件读取失败: {e}")
            except Exception as e:
                logger.error(f"❌ 配置文件读取时发生其他异常: {e}")
                raise e
            
            # 重置进度
            self.current_step = 0
            self.total_steps = args.max_train_steps
            self.current_loss = 0.0
            
            # 记录详细的步数信息
            logger.info(f"🎯 训练参数准备完成:")
            logger.info(f"   - 最大训练步数: {args.max_train_steps}")
            logger.info(f"   - 批次大小: {getattr(args, 'train_batch_size', 'N/A')}")
            logger.info(f"   - 梯度累积步数: {getattr(args, 'gradient_accumulation_steps', 'N/A')}")
            logger.info(f"   - 学习率: {getattr(args, 'learning_rate', 'N/A')}")
            
            # 验证步数设置
            if self.total_steps <= 0:
                logger.warning(f"⚠️ 总步数设置为0或负数: {self.total_steps}，这可能导致进度显示异常")
            elif self.total_steps > 10000:
                logger.warning(f"⚠️ 总步数设置过大: {self.total_steps}，请确认是否需要这么多步数")
            
            # 每次训练时重新创建训练器实例，避免状态冲突
            self.trainer = self.trainer_class()
            
            # 设置进度劫持
            if self.progress_callback:
                self.progress_hijacker.setup_hijacking()
            
            return args, original_grad_state
            
        except Exception as e:
            # 恢复原始梯度状态
            torch.set_grad_enabled(original_grad_state)
            logger.error(f"💥 发生异常: {e}")
            import traceback
            logger.error(f"💥 异常堆栈: {traceback.format_exc()}")
            raise e
    
    def _cleanup_training(self, original_grad_state):
        """清理训练环境"""
        torch.set_grad_enabled(original_grad_state)
        
        # 清理进度劫持
        self.progress_hijacker.restore_hijacking()
        
        logger.debug("✅ 训练环境清理完成")
    
    @abstractmethod
    def train(self, args_dict):
        """训练方法（由子类实现）"""
        pass

    def update_progress(self, step, total_steps, loss, status="训练中"):
        """更新训练进度
        Args:
            step: 当前步数
            total_steps: 总步数
            loss: 当前损失值
            status: 训练状态
        """
        logger.debug(f"🔄 BaseTrainerAPI.update_progress被调用: step={step}, total_steps={total_steps}, loss={loss}, status={status}")
        
        self.current_step = step
        self.total_steps = total_steps
        
        # 预判断loss是否可以转换为float
        current_loss = 0.0
        if loss is not None:
            if isinstance(loss, (int, float)):
                current_loss = float(loss)
            elif isinstance(loss, str) and loss.replace('.', '', 1).isdigit():
                current_loss = float(loss)
        
        self.current_loss = current_loss
        
        # 记录日志
        if total_steps > 0:
            if current_loss > 0:
                logger.debug(f"训练进度: {step}/{total_steps} ({step/total_steps*100:.1f}%), 损失: {current_loss:.4f}")
            else:
                logger.debug(f"训练进度: {step}/{total_steps} ({step/total_steps*100:.1f}%)")
        else:
            if current_loss > 0:
                logger.debug(f"训练进度: {step}/0 (0.0%), 损失: {current_loss:.4f}")
            else:
                logger.debug(f"训练进度: {step}/0 (0.0%)")
        
        # 调用进度回调
        if self.progress_callback:
            try:
                logger.debug(f"📞 调用进度回调函数: {self.progress_callback}")
                self.progress_callback(step, total_steps, current_loss, status)
                logger.debug(f"✅ 进度回调函数调用成功")
            except Exception as e:
                logger.error(f"进度回调执行失败: {str(e)}")
        else:
            logger.warning(f"⚠️ 没有设置进度回调函数")

class DirectTrainerAPI(BaseTrainerAPI):
    """直接训练器类 - 非线程版本"""
    
    def train(self, args_dict, progress_callback=None):
        """直接训练方法
        Args:
            args_dict: 训练参数字典
        Returns:
            str: 训练结果消息
        """
        try:
             # 设置进度回调
            if progress_callback:
                self.set_progress_callback(progress_callback)
            # 准备训练环境
            args, original_grad_state = self._prepare_training(args_dict)
            
            # 执行训练
            logger.info("🚀 开始训练...")
            self.trainer.train(args)
            
            # 清理训练环境
            self._cleanup_training(original_grad_state)
            
            logger.info("🎉 训练完成！")
            return "训练完成"
            
        except Exception as e:
            logger.error(f"训练失败: {e}")
            # 确保清理环境
            try:
                self._cleanup_training(original_grad_state)
            except:
                pass
            raise

class SimpleTrainerAPI(BaseTrainerAPI):
    """简单的训练器工具类 - 线程版本"""
    
    def __init__(self, trainer_module_name, trainer_class_name):
        super().__init__(trainer_module_name, trainer_class_name)
        self.train_thread = None
        self.is_training = False
        self.should_stop = False
    
    def stop_training(self):
        """停止训练 - 直接通过线程对象终止
        Returns:
            str: 停止状态消息
        """
        if not self.is_training or not self.train_thread:
            return f"没有正在进行的{self.trainer_module_name}训练任务"
        
        self.should_stop = True
        logger.debug("正在强制终止训练线程...")
        
        try:
            # 直接使用train_thread对象终止线程
            if self.train_thread.is_alive():
                thread_id = self.train_thread.ident
                if thread_id:
                    res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
                        ctypes.c_long(thread_id), 
                        ctypes.py_object(SystemExit)
                    )
                    if res > 1:
                        ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
                        logger.warning("线程终止失败")
                    else:
                        logger.info("训练线程已强制终止")
                else:
                    logger.warning("无法获取线程ID")
            else:
                logger.info("训练线程已经结束")
        except Exception as e:
            logger.warning(f"终止训练线程时出错: {e}")
        
        # 清理状态
        self.is_training = False
        self.train_thread = None
        
        return "训练已强制终止"
    
    def train(self, args_dict, progress_callback=None):
        """启动训练线程
        Args:
            args_dict: 训练参数字典
            progress_callback: 进度回调函数
        Returns:
            str: 训练状态消息
        """
        if self.is_training:
            return f"已有{self.trainer_module_name}训练任务正在进行中"
        
        self.is_training = True
        self.training_result = None
        self.should_stop = False  # 重置停止标志
        
        # 设置进度回调
        if progress_callback:
            self.set_progress_callback(progress_callback)
        
        # 启动训练线程
        self.train_thread = threading.Thread(
            target=self._train_thread_func,
            args=(args_dict,)
        )
        self.train_thread.daemon = True  # 设置为守护线程
        self.train_thread.start()
        
        # 等待训练完成（但允许中断）
        try:
            logger.debug("开始等待训练完成...")
            while self.train_thread and self.train_thread.is_alive():
                self.train_thread.join(timeout=1.0)  # 每秒检查一次
                if self.should_stop:
                    logger.debug("检测到停止信号，等待训练线程结束")
                    break
            logger.info("训练线程已结束")
        except KeyboardInterrupt:
            logger.info("收到键盘中断信号")
            self.stop_training()
        
        return self.training_result or "训练完成"
    
    def _train_thread_func(self, args_dict):
        """训练线程函数"""
        original_grad_state = None
        try:
            # 检查停止信号
            if self.should_stop:
                logger.info("训练被中断，跳过训练")
                return
            
            # 准备训练环境
            args, original_grad_state = self._prepare_training(args_dict)
            
            # 再次检查停止信号
            if self.should_stop:
                logger.info("训练被中断，跳过训练")
                return
            
            # 执行训练
            logger.info("🚀 开始训练...")
            self.trainer.train(args)
            
            # 清理训练环境
            self._cleanup_training(original_grad_state)
            
            # 发送最终进度更新
            if self.progress_callback:
                try:
                    # 获取最终进度
                    final_progress = self.get_training_progress()
                    self.progress_callback(
                        current_step=final_progress.get("current_step", 0),
                        total_steps=final_progress.get("total_steps", 0),
                        loss=final_progress.get("current_loss", 0.0),
                        status="训练完成"
                    )
                except Exception as e:
                    logger.warning(f"发送最终进度更新失败: {e}")
            
            logger.info("🎉 训练完成！")
            
        except Exception as e:
            logger.error(f"训练失败: {e}")
            # 发送错误状态
            if self.progress_callback:
                try:
                    final_progress = self.get_training_progress()
                    self.progress_callback(
                        current_step=final_progress.get("current_step", 0),
                        total_steps=final_progress.get("total_steps", 0),
                        loss=final_progress.get("current_loss", 0.0),
                        status=f"训练失败: {str(e)}"
                    )
                except Exception as callback_e:
                    logger.warning(f"发送错误状态失败: {callback_e}")
            
            # 确保清理环境
            try:
                if original_grad_state is not None:
                    self._cleanup_training(original_grad_state)
            except:
                pass
        finally:
            # 清理训练状态
            self.is_training = False
            self.train_thread = None
    
    def get_training_status(self):
        """获取训练状态"""
        return {
            "is_training": self.is_training,
            "stop_flag": self.should_stop,
            "progress": self.get_training_progress()
        }
