#!/usr/bin/env python3
"""
FLUX调试参数节点 - 用于调试FLUX训练参数
"""

import os
import sys
from typing import Dict, Any
import torch

# 导入ComfyUI模块
import folder_paths

# 使用独立的日志系统，不受其他脚本影响
from .lora_trainer_utils.custom_logger import get_logger

# 为这个模块创建独立的logger
logger = get_logger("flux_debug_params")

class FluxDebugParams:
    """FLUX调试参数节点 - 用于复现和定位训练失败问题"""
    
    aux_id = "comfyui_lora_train"
    
    @classmethod
    def INPUT_TYPES(cls):
        """定义输入类型 - 将所有固定参数变成可调控参数"""
        return {
            "required": {
                # === 基础模型路径参数 ===
                "pretrained_model_name_or_path": (folder_paths.get_filename_list("checkpoints"), {
                    "default": "", 
                    "multiline": False, 
                    "tooltip": "预训练模型路径 - 选择要训练的基础模型"
                }),
                "clip_l": (folder_paths.get_filename_list("clip"), {
                    "tooltip": "CLIP-L模型文件 - FLUX.1训练必需的文本编码器"
                }),
                "t5xxl": (folder_paths.get_filename_list("text_encoders"), {
                    "tooltip": "T5XXL模型文件 - FLUX.1训练必需的文本编码器"
                }),
                "ae": (folder_paths.get_filename_list("vae"), {
                    "tooltip": "AutoEncoder模型文件 - FLUX.1训练必需的图像编码器"
                }),
                
                # === 训练数据参数 ===
                "train_data_dir": ("STRING", {
                    "default": "", 
                    "multiline": False, 
                    "tooltip": "训练数据目录路径"
                }),
                "output_dir": ("STRING", {
                    "default": "./output", 
                    "multiline": False, 
                    "tooltip": "模型输出目录"
                }),
                "output_name": ("STRING", {
                    "default": "flux_debug_lora", 
                    "multiline": False, 
                    "tooltip": "输出模型名称"
                }),
                
                # === 网络结构参数 ===
                "network_dim": ("INT", {
                    "default": 32, 
                    "min": 1, 
                    "max": 128,
                    "tooltip": "LoRA网络维度 - 影响模型学习能力和显存占用"
                }),
                "network_alpha": ("FLOAT", {
                    "default": 16.0, 
                    "min": 0.1, 
                    "max": 128.0,
                    "tooltip": "LoRA网络alpha值 - 通常为network_dim的1/2"
                }),
                "network_train_unet_only": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "仅训练UNet - 影响训练速度和显存占用"
                }),
                
                # === 学习控制参数 ===
                "learning_rate": ("FLOAT", {
                    "default": 1e-4, 
                    "min": 1e-6, 
                    "max": 1e-2,
                    "tooltip": "学习率 - 控制模型学习速度"
                }),
                "max_train_steps": ("INT", {
                    "default": 200, 
                    "min": 1, 
                    "max": 10000,
                    "tooltip": "最大训练步数 - 控制训练时长"
                }),
                "train_batch_size": ("INT", {
                    "default": 1, 
                    "min": 1, 
                    "max": 4,
                    "tooltip": "训练批次大小 - 影响显存占用"
                }),
                "gradient_accumulation_steps": ("INT", {
                    "default": 1, 
                    "min": 1, 
                    "max": 16,
                    "tooltip": "梯度累积步数 - 模拟更大批次"
                }),
                
                # === 分辨率参数 ===
                "resolution": ("INT", {
                    "default": 512, 
                    "min": 256, 
                    "max": 1024,
                    "tooltip": "训练图像分辨率 - 影响显存占用"
                }),
                
                # === FLUX特定参数 ===
                "guidance_scale": ("FLOAT", {
                    "default": 1.0, 
                    "min": 0.1, 
                    "max": 10.0,
                    "tooltip": "引导尺度 - FLUX.1训练参数"
                }),
                "timestep_sampling": (["shift", "uniform"], {
                    "default": "shift", 
                    "tooltip": "时间步采样方式"
                }),
                "model_prediction_type": (["raw", "v_prediction", "epsilon"], {
                    "default": "raw", 
                    "tooltip": "模型预测类型"
                }),
                "discrete_flow_shift": ("FLOAT", {
                    "default": 3.1582, 
                    "min": 0.1, 
                    "max": 10.0,
                    "tooltip": "离散流偏移"
                }),
                
                # === 优化器参数 ===
                "optimizer_type": (["AdamW", "AdamW8bit", "Lion", "Lion8bit", "SGDNesterov", "SGDNesterov8bit", "AdaFactor"], {
                    "default": "Lion", 
                    "tooltip": "优化器类型 - 影响训练效果和速度"
                }),
                "mixed_precision": (["no", "fp16", "bf16"], {
                    "default": "bf16", 
                    "tooltip": "混合精度训练 - 影响显存占用和训练速度"
                }),
                
                # === 内存优化参数 ===
                "max_data_loader_n_workers": ("INT", {
                    "default": 8, 
                    "min": 0, 
                    "max": 16,
                    "tooltip": "数据加载器工作进程数"
                }),
                "blocks_to_swap": ("INT", {
                    "default": 8, 
                    "min": 0, 
                    "max": 20,
                    "tooltip": "交换到CPU的模型块数"
                }),
                
                # === 缓存策略参数 ===
                "cache_latents": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "缓存潜在表示 - 提升训练速度"
                }),
                "cache_text_encoder_outputs": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "缓存文本编码器输出"
                }),
                "cache_text_encoder_outputs_to_disk": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "将文本编码器输出缓存到磁盘"
                }),
                
                # === 分桶设置参数 ===
                "enable_bucket": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "启用图像分桶 - 提高训练效率"
                }),
                "min_bucket_reso": ("INT", {
                    "default": 256, 
                    "min": 128, 
                    "max": 512,
                    "tooltip": "最小分桶分辨率"
                }),
                "max_bucket_reso": ("INT", {
                    "default": 512, 
                    "min": 256, 
                    "max": 1024,
                    "tooltip": "最大分桶分辨率"
                }),
                "bucket_reso_steps": ("INT", {
                    "default": 64, 
                    "min": 32, 
                    "max": 128,
                    "tooltip": "分桶分辨率步长"
                }),
                
                # === 学习率调度参数 ===
                "lr_scheduler": (["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], {
                    "default": "constant_with_warmup", 
                    "tooltip": "学习率调度器"
                }),
                "max_grad_norm": ("FLOAT", {
                    "default": 0.0, 
                    "min": 0.0, 
                    "max": 10.0,
                    "tooltip": "最大梯度范数 - 防止梯度爆炸"
                }),
                
                # === 高级优化参数 ===
                "gradient_checkpointing": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "梯度检查点 - 节省显存但降低速度"
                }),
                "xformers": ("BOOLEAN", {
                    "default": False, 
                    "tooltip": "使用xformers - 优化注意力计算"
                }),
                "sdpa": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "使用SDPA - 优化注意力机制"
                }),
                "highvram": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "高显存模式 - 启用更多优化选项"
                }),
                "full_bf16": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "全BF16模式 - 强制所有计算使用BF16"
                }),
                
                # === CPU卸载参数 ===
                "enable_sequential_cpu_offload": ("BOOLEAN", {
                    "default": False, 
                    "tooltip": "启用顺序CPU卸载 - 节省显存但大幅降低速度"
                }),
                "enable_cpu_offload": ("BOOLEAN", {
                    "default": False, 
                    "tooltip": "启用CPU卸载 - 节省显存但降低速度"
                }),
                "cpu_offload_checkpointing": ("BOOLEAN", {
                    "default": False, 
                    "tooltip": "启用CPU卸载检查点"
                }),
                
                # === 保存和精度参数 ===
                "save_model_as": (["safetensors", "ckpt", "diffusers"], {
                    "default": "safetensors", 
                    "tooltip": "模型保存格式"
                }),
                "save_precision": (["fp16", "bf16", "no"], {
                    "default": "bf16", 
                    "tooltip": "保存精度 - 影响模型文件大小"
                }),
                "save_every_n_epochs": ("INT", {
                    "default": 1, 
                    "min": 1, 
                    "max": 10,
                    "tooltip": "每N轮保存一次模型"
                }),
                
                # === 其他优化参数 ===
                "persistent_data_loader_workers": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "持久化数据加载器工作进程"
                }),
                "fp8_base": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "使用FP8基础模型 - 节省显存"
                }),
                "disable_mmap_load_safetensors": ("BOOLEAN", {
                    "default": True, 
                    "tooltip": "禁用内存映射加载safetensors"
                }),
                
                # === 随机种子 ===
                "seed": ("INT", {
                    "default": 42, 
                    "min": 0, 
                    "max": 999999,
                    "tooltip": "随机种子 - 确保结果可重现"
                }),
            }
        }

    RETURN_TYPES = ("FLUX_TRAINING_PARAMS", "STRING")
    RETURN_NAMES = ("训练参数", "参数摘要")
    FUNCTION = "create_debug_params"
    CATEGORY = "AI训练/FLUX调试"

    def validate_inputs(self, kwargs):
        """验证输入参数"""
        errors = []
        
        # 验证必需路径
        if not kwargs.get("pretrained_model_name_or_path"):
            errors.append("错误：必须选择预训练模型")
        
        if not kwargs.get("clip_l"):
            errors.append("错误：必须选择CLIP-L模型")
        
        if not kwargs.get("t5xxl"):
            errors.append("错误：必须选择T5XXL模型")
        
        if not kwargs.get("ae"):
            errors.append("错误：必须选择AutoEncoder模型")
        
        if not kwargs.get("train_data_dir"):
            errors.append("错误：必须设置训练数据目录")
        
        # 验证数值范围
        if kwargs.get("network_dim", 0) <= 0:
            errors.append("错误：network_dim必须大于0")
        
        if kwargs.get("learning_rate", 0) <= 0:
            errors.append("错误：learning_rate必须大于0")
        
        if kwargs.get("max_train_steps", 0) <= 0:
            errors.append("错误：max_train_steps必须大于0")
        
        # 验证分辨率
        resolution = kwargs.get("resolution", 0)
        if resolution < 256 or resolution > 1024:
            errors.append("错误：resolution必须在256-1024之间")
        
        # 验证批次大小
        batch_size = kwargs.get("train_batch_size", 0)
        if batch_size < 1 or batch_size > 4:
            errors.append("错误：train_batch_size必须在1-4之间")
        
        return errors

    def create_debug_params(self, **kwargs):
        """创建调试参数 - 将所有参数变成可调控的"""
        
        # 验证输入参数
        validation_errors = self.validate_inputs(kwargs)
        if validation_errors:
            error_msg = "\n".join(validation_errors)
            logger.error(f"参数验证失败：\n{error_msg}")
            return ({}, f"❌ 参数验证失败：\n{error_msg}")
        
        # 处理分辨率格式
        kwargs['resolution'] = f"{kwargs['resolution']},{kwargs['resolution']}"
        
        # 将相对路径转换为绝对路径
        for key in ["clip_l", "t5xxl", "ae", "pretrained_model_name_or_path"]:
            if kwargs[key]:
                try:
                    if key == "clip_l":
                        kwargs[key] = folder_paths.get_full_path("clip", kwargs[key])
                    elif key == "t5xxl":
                        kwargs[key] = folder_paths.get_full_path("text_encoders", kwargs[key])
                    elif key == "ae":
                        kwargs[key] = folder_paths.get_full_path("vae", kwargs[key])
                    elif key == "pretrained_model_name_or_path":
                        kwargs[key] = folder_paths.get_full_path("checkpoints", kwargs[key])
                except Exception as e:
                    logger.warning(f"无法获取 {key} 的完整路径: {e}")
        
        # 根据优化器类型设置optimizer_args
        optimizer_type = kwargs.get("optimizer_type", "Lion")
        if optimizer_type == "AdaFactor":
            kwargs["optimizer_args"] = ["relative_step=False", "scale_parameter=False", "warmup_init=False"]
        elif optimizer_type == "AdamW":
            kwargs["optimizer_args"] = ["weight_decay=0.01", "betas=0.9,0.999"]
        elif optimizer_type == "AdamW8bit":
            kwargs["optimizer_args"] = ["weight_decay=0.01", "betas=0.9,0.999"]
        elif optimizer_type == "Lion":
            kwargs["optimizer_args"] = ["weight_decay=0.01"]
        elif optimizer_type == "Lion8bit":
            kwargs["optimizer_args"] = ["weight_decay=0.01"]
        elif optimizer_type == "SGDNesterov":
            kwargs["optimizer_args"] = ["momentum=0.9", "weight_decay=0.01"]
        elif optimizer_type == "SGDNesterov8bit":
            kwargs["optimizer_args"] = ["momentum=0.9", "weight_decay=0.01"]
        else:
            kwargs["optimizer_args"] = ["weight_decay=0.01", "betas=0.9,0.999"]
        
        # 添加FLUX特定的固定参数（这些是必须的，不能改变）
        fixed_params = {
            "network_module": "networks.lora_flux",
            "flux": True,
            "v2": False,
            "v_parameterization": False,
        }
        
        # 将固定参数添加到kwargs中
        for k, v in fixed_params.items():
            if k not in kwargs:
                kwargs[k] = v
        
        # 生成参数摘要
        summary = self.generate_summary(kwargs)
        
        logger.info(f"FLUX调试参数已创建：\n{summary}")
        
        return (kwargs, summary)

    def generate_summary(self, params):
        """生成参数摘要"""
        summary = "🔧 FLUX调试参数配置摘要\n"
        summary += "=" * 50 + "\n"
        
        # 基础配置
        summary += f"📁 模型路径: {os.path.basename(params.get('pretrained_model_name_or_path', 'N/A'))}\n"
        summary += f"📁 训练数据: {params.get('train_data_dir', 'N/A')}\n"
        summary += f"📁 输出目录: {params.get('output_dir', 'N/A')}\n"
        summary += f"📁 输出名称: {params.get('output_name', 'N/A')}\n\n"
        
        # 网络配置
        summary += f"🧠 网络维度: {params.get('network_dim', 'N/A')}\n"
        summary += f"🧠 网络Alpha: {params.get('network_alpha', 'N/A')}\n"
        summary += f"🧠 仅训练UNet: {'是' if params.get('network_train_unet_only') else '否'}\n\n"
        
        # 训练配置
        summary += f"⚡ 学习率: {params.get('learning_rate', 'N/A')}\n"
        summary += f"⚡ 训练步数: {params.get('max_train_steps', 'N/A')}\n"
        summary += f"⚡ 批次大小: {params.get('train_batch_size', 'N/A')}\n"
        summary += f"⚡ 梯度累积: {params.get('gradient_accumulation_steps', 'N/A')}\n"
        summary += f"⚡ 分辨率: {params.get('resolution', 'N/A')}\n\n"
        
        # 优化器配置
        summary += f"🔧 优化器: {params.get('optimizer_type', 'N/A')}\n"
        summary += f"🔧 混合精度: {params.get('mixed_precision', 'N/A')}\n"
        summary += f"🔧 学习率调度: {params.get('lr_scheduler', 'N/A')}\n"
        summary += f"🔧 梯度裁剪: {params.get('max_grad_norm', 'N/A')}\n\n"
        
        # 内存优化
        summary += f"💾 梯度检查点: {'启用' if params.get('gradient_checkpointing') else '禁用'}\n"
        summary += f"💾 块交换: {params.get('blocks_to_swap', 'N/A')}\n"
        summary += f"💾 高显存模式: {'启用' if params.get('highvram') else '禁用'}\n"
        summary += f"💾 全BF16: {'启用' if params.get('full_bf16') else '禁用'}\n"
        summary += f"💾 SDPA: {'启用' if params.get('sdpa') else '禁用'}\n"
        summary += f"💾 xformers: {'启用' if params.get('xformers') else '禁用'}\n\n"
        
        # 缓存配置
        summary += f"📦 缓存Latents: {'启用' if params.get('cache_latents') else '禁用'}\n"
        summary += f"📦 缓存文本编码: {'启用' if params.get('cache_text_encoder_outputs') else '禁用'}\n"
        summary += f"📦 缓存到磁盘: {'启用' if params.get('cache_text_encoder_outputs_to_disk') else '禁用'}\n\n"
        
        # 分桶配置
        summary += f"🪣 启用分桶: {'是' if params.get('enable_bucket') else '否'}\n"
        if params.get('enable_bucket'):
            summary += f"🪣 分桶范围: {params.get('min_bucket_reso', 'N/A')}-{params.get('max_bucket_reso', 'N/A')}\n"
            summary += f"🪣 分桶步长: {params.get('bucket_reso_steps', 'N/A')}\n\n"
        
        # 风险提示
        risk_factors = []
        if params.get('train_batch_size', 1) > 1:
            risk_factors.append("批次大小>1可能显存不足")
        if params.get('network_dim', 32) > 64:
            risk_factors.append("网络维度>64可能显存不足")
        if params.get('resolution', '512,512').split(',')[0] > '512':
            risk_factors.append("分辨率>512可能显存不足")
        if not params.get('gradient_checkpointing'):
            risk_factors.append("未启用梯度检查点")
        if not params.get('cache_latents'):
            risk_factors.append("未启用缓存可能速度慢")
        
        if risk_factors:
            summary += "⚠️ 潜在风险因素:\n"
            for risk in risk_factors:
                summary += f"   • {risk}\n"
        else:
            summary += "✅ 配置相对安全\n"
        
        return summary


# 节点注册
NODE_CLASS_MAPPINGS = {
    "FluxDebugParams": FluxDebugParams,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "FluxDebugParams": "FLUX调试参数",
}
