import comfy.lora
import comfy.utils
import folder_paths
import hashlib
import json
import os
import time
import logging

class MultiLoraLoader:

    @classmethod
    def IS_CHANGED(cls, loras_json, **kwargs):
        return hashlib.sha256(loras_json.encode()).hexdigest()

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": ("MODEL",),
                "clip": ("CLIP",),
                "loras_json": ("STRING", {
                    "multiline": True, 
                    "default": '[]'
                }),
                "verbose": ("BOOLEAN", {"default": True}),
            }
        }
    
    RETURN_TYPES = ("MODEL", "CLIP")
    RETURN_NAMES = ("MODEL", "CLIP")
    CATEGORY = "LoRA Tools"
    FUNCTION = "load_loras"
    
    def load_loras(self, model, clip, loras_json, verbose=True):
        start_time = time.time()
        applied_count = 0
        
        try:
            print("正在处理"+loras_json)
            # 解析JSON字符串
            lora_list = json.loads(loras_json)
            
            if verbose:
                print(f"🔍 开始加载 {len(lora_list)} 个LoRA")
            
            # 克隆原始模型以避免修改原始对象
            model_patcher = model.clone()
            clip_patcher = clip.clone()
            
            # 准备键映射表 - 关键步骤！
            key_map = {}
            key_map = comfy.lora.model_lora_keys_unet(model_patcher.model, key_map)
            key_map = comfy.lora.model_lora_keys_clip(clip_patcher.cond_stage_model, key_map)
            
            for i, lora_config in enumerate(lora_list):
                # 获取LoRA配置
                lora_name = lora_config.get("lora_name", "")
                strength_model = lora_config.get("strength_model", 1.0)
                strength_clip = lora_config.get("strength_clip", 1.0)
                
                if not lora_name:
                    if verbose: print(f"⚠️ 第 {i+1} 个LoRA: 名称未指定，跳过")
                    continue
                
                # 构建完整路径
                lora_path = None

                # 首先检查 lora_name 是否直接对应一个存在的文件
                if os.path.isfile(lora_name):
                    lora_path = lora_name
                    if verbose: 
                        print(f"✅ 第 {i+1} 个LoRA: 使用直接文件路径 - {lora_path}")
                else:
                    # 如果不是文件，尝试通过标准路径获取
                    full_path = folder_paths.get_full_path("loras", lora_name)
                    if full_path and os.path.isfile(full_path):
                        lora_path = full_path
                        if verbose: 
                            print(f"✅ 第 {i+1} 个LoRA: 使用标准路径 - {lora_path}")

                # 如果两种方式都找不到文件，跳过
                if lora_path is None:
                    if verbose: 
                        print(f"⚠️ 第 {i+1} 个LoRA: 文件不存在 - 直接路径: '{lora_name}', 标准路径: '{full_path if full_path else 'N/A'}'")
                    continue
                
                try:
                    # 加载LoRA数据
                    lora_data = comfy.utils.load_torch_file(lora_path, safe_load=True)
                    
                    # 调试输出
                    if verbose:
                        print(f"🔧 加载LoRA #{i+1}: {lora_name}")
                        print(f"   模型强度: {strength_model}, CLIP强度: {strength_clip}")
                    
                    # 使用官方API加载LoRA
                    loaded = comfy.lora.load_lora(lora_data, key_map)
                    
                    # 应用模型权重
                    model_patcher.add_patches(loaded, strength_model)
                    
                    # 应用CLIP权重
                    clip_patcher.add_patches(loaded, strength_clip)
                    
                    applied_count += 1
                    if verbose: print(f"    ✅ 应用LoRA成功")
                    
                except Exception as e:
                    if verbose: 
                        print(f"❌ 加载LoRA #{i+1} 错误: {str(e)}")
                        import traceback
                        traceback.print_exc()
            
            elapsed = time.time() - start_time
            if verbose:
                print(f"🎉 成功加载 {applied_count}/{len(lora_list)} 个LoRA, 耗时: {elapsed:.2f}s")
            
            return (model_patcher, clip_patcher)
            
        except Exception as e:
            print(f"❌ 全局LoRA加载错误: {str(e)}")
            import traceback
            traceback.print_exc()
            return (model, clip)

# 节点注册
NODE_CLASS_MAPPINGS = {
    "Multi Lora Loader": MultiLoraLoader
}