import comfy.lora
import comfy.utils
import folder_paths
import hashlib
import json
import os
import time
import torch
from comfy.model_management import get_torch_device
from comfy.model_patcher import ModelPatcher


class MarvelousTextInputWithLoraLoader:
    """
    依赖CLIP内置tokenize方法处理文本长度，确保conditioning始终返回有效列表
    支持SDXL模型所需的pooled_output信息
    """
    CATEGORY = "Marvelous Tools"
    _lora_cache = {}  # 缓存结构: {lora_hash: (model_patcher, clip_patcher, timestamp)}
    _CACHE_MAX_SIZE = 2

    @classmethod
    def IS_CHANGED(cls, prompt_text, loras_json, **kwargs):
        combined = f"{prompt_text}|{loras_json}"
        return hashlib.sha256(combined.encode()).hexdigest()

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "prompt_text": ("STRING", {
                    "default": "",
                    "multiline": True,
                }),
                "loras_json": ("STRING", {
                    "default": '[]',
                    "multiline": True,
                })
            },
            "optional": {
                "model": ("MODEL",),
                "clip": ("CLIP",)
            }
        }

    RETURN_TYPES = ("STRING", "MODEL", "CLIP", "CONDITIONING")
    RETURN_NAMES = ("text", "MODEL", "CLIP", "conditioning")
    FUNCTION = "process"

    def process(self, prompt_text, loras_json, model=None, clip=None):
        verbose = True
        if clip is None:
            return (
                prompt_text,
                model,
                clip,
                []  # 符合规范的空conditioning
            )

        # 如果model为None，无法应用LoRA，直接编码文本
        if model is None:
            # 编码文本 - 获取完整输出(包括pooled_output)
            if hasattr(clip, 'encode_from_tokens'):
                # 处理SDXL的CLIP模型
                tokens = clip.tokenize(prompt_text)
                cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
            else:
                # 处理普通CLIP模型
                cond = clip.encode(prompt_text)
                pooled = None

            # 构建包含pooled_output的元数据字典
            cond_dict = {}
            if pooled is not None:
                cond_dict["pooled_output"] = pooled

            # 包装成ComfyUI要求的conditioning格式
            conditioning = [(cond, cond_dict)]
            return (
                prompt_text,
                model,
                clip,
                conditioning
            )

        # 计算lora_hash用于缓存
        lora_hash = hashlib.sha256(loras_json.encode()).hexdigest()

        # 检查缓存
        current_time = time.time()
        model_patcher = None
        clip_patcher = None

        if lora_hash in self._lora_cache:
            if verbose:
                print(f"📦 从缓存加载LoRA: {lora_hash}")
            model_patcher, clip_patcher, _ = self._lora_cache[lora_hash]
            # 更新缓存时间
            self._lora_cache[lora_hash] = (model_patcher, clip_patcher, current_time)
        else:
            # 缓存未命中，加载LoRA
            model_patcher, clip_patcher = self.load_loras(model, clip, loras_json, verbose)

            # 存入缓存并检查大小限制
            self._lora_cache[lora_hash] = (model_patcher, clip_patcher, current_time)

            # 如果缓存超过最大限制，移除最旧的缓存
            if len(self._lora_cache) > self._CACHE_MAX_SIZE:
                oldest_key = min(self._lora_cache.keys(), key=lambda k: self._lora_cache[k][2])
                if verbose:
                    print(f"🗑️ 移除旧缓存: {oldest_key}")
                del self._lora_cache[oldest_key]

        # 使用加载后的clip_patcher编码文本
        if hasattr(clip_patcher, 'encode_from_tokens'):
            # 处理SDXL的CLIP模型
            tokens = clip_patcher.tokenize(prompt_text)
            cond, pooled = clip_patcher.encode_from_tokens(tokens, return_pooled=True)
        else:
            # 处理普通CLIP模型
            cond = clip_patcher.encode(prompt_text)
            pooled = None

        # 构建包含pooled_output的元数据字典
        cond_dict = {}
        if pooled is not None:
            cond_dict["pooled_output"] = pooled

        # 包装成正确的conditioning格式
        conditioning = [(cond, cond_dict)]

        return (
            prompt_text,
            model_patcher,
            clip_patcher,
            conditioning
        )

    def load_loras(self, model, clip, loras_json, verbose=True):
        start_time = time.time()
        applied_count = 0

        try:
            if verbose:
                print("正在处理LoRA配置: " + loras_json)

            # 解析JSON字符串
            lora_list = json.loads(loras_json)

            if verbose:
                print(f"🔍 开始加载 {len(lora_list)} 个LoRA")

            # 克隆原始模型以避免修改原始对象
            model_patcher = model.clone()
            clip_patcher = clip.clone()

            # 准备键映射表
            key_map = {}
            key_map = comfy.lora.model_lora_keys_unet(model_patcher.model, key_map)
            key_map = comfy.lora.model_lora_keys_clip(clip_patcher.cond_stage_model, key_map)

            for i, lora_config in enumerate(lora_list):
                # 获取LoRA配置
                lora_name = lora_config.get("lora_name", "")
                strength_model = lora_config.get("strength_model", 1.0)
                strength_clip = lora_config.get("strength_clip", 1.0)

                if not lora_name:
                    if verbose:
                        print(f"⚠️ 第 {i + 1} 个LoRA: 名称未指定，跳过")
                    continue

                # 构建完整路径
                lora_path = None

                # 首先检查 lora_name 是否直接对应一个存在的文件
                if os.path.isfile(lora_name):
                    lora_path = lora_name
                    if verbose:
                        print(f"✅ 第 {i + 1} 个LoRA: 使用直接文件路径 - {lora_path}")
                else:
                    # 如果不是文件，尝试通过标准路径获取
                    full_path = folder_paths.get_full_path("loras", lora_name)
                    if full_path and os.path.isfile(full_path):
                        lora_path = full_path
                        if verbose:
                            print(f"✅ 第 {i + 1} 个LoRA: 使用标准路径 - {lora_path}")

                # 如果两种方式都找不到文件，跳过
                if lora_path is None:
                    if verbose:
                        print(
                            f"⚠️ 第 {i + 1} 个LoRA: 文件不存在 - 直接路径: '{lora_name}', 标准路径: '{full_path if full_path else 'N/A'}'")
                    continue

                try:
                    # 加载LoRA数据
                    lora_data = comfy.utils.load_torch_file(lora_path, safe_load=True)

                    # 调试输出
                    if verbose:
                        print(f"🔧 加载LoRA #{i + 1}: {lora_name}")
                        print(f"   模型强度: {strength_model}, CLIP强度: {strength_clip}")

                    # 使用官方API加载LoRA
                    loaded = comfy.lora.load_lora(lora_data, key_map)

                    # 应用模型权重
                    model_patcher.add_patches(loaded, strength_model)

                    # 应用CLIP权重
                    clip_patcher.add_patches(loaded, strength_clip)

                    applied_count += 1
                    if verbose:
                        print(f"    ✅ 应用LoRA成功")

                except Exception as e:
                    if verbose:
                        print(f"❌ 加载LoRA #{i + 1} 错误: {str(e)}")
                        import traceback
                        traceback.print_exc()

            elapsed = time.time() - start_time
            if verbose:
                print(f"🎉 成功加载 {applied_count}/{len(lora_list)} 个LoRA, 耗时: {elapsed:.2f}s")

            return (model_patcher, clip_patcher)

        except Exception as e:
            print(f"❌ 全局LoRA加载错误: {str(e)}")
            import traceback
            traceback.print_exc()
            return (model, clip)
