import folder_paths
import os
import numpy as np
import torch
import einops
import cv2
from comfy.utils import load_torch_file, ProgressBar
from comfy.model_management import unet_offload_device
from ...UL_common.common import get_dtype_by_name, tensor2numpy_cv2, numpy_cv2tensor, get_device_by_name, clean_up, get_filelist_and_folderlist

CONFIG_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Ctrlora_Scripts', 'configs')
ckpts_under_node_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Ctrlora_ckpts')


class Ctrlora_Model_Loader:
    @classmethod
    def INPUT_TYPES(s):
        clip_dir = os.path.join(folder_paths.models_dir, "clip")
        clip_list = [] if not os.path.exists(clip_dir) else os.listdir(clip_dir)
        text_encoders = clip_list + os.listdir(os.path.join(folder_paths.models_dir, "text_encoders"))
        text_encoder_list = []
        for folder in text_encoders:
            if os.path.isdir(os.path.join(folder_paths.models_dir, "clip", folder)) or os.path.isdir(os.path.join(folder_paths.models_dir, "text_encoders", folder)):
                text_encoder_list.append(folder)
                
        control_net_list = [file for file in (get_filelist_and_folderlist(ckpts_under_node_dir, ['.ckpt'])[0] + folder_paths.get_filename_list("controlnet")) if 'ctrlora_sd15_basecn700k' in file and 'rank' not in file]
        
        ctrlora_list = [file for file in (get_filelist_and_folderlist(ckpts_under_node_dir, ['.ckpt'])[0] + folder_paths.get_filename_list("controlnet")) if 'ctrlora_sd15_basecn700k' in file and 'rank' in file]
        return {
            "required": {
                "ckpt_name": (["None"] + folder_paths.get_filename_list("checkpoints"), ),
                "clip_name": (["None"] + text_encoder_list, ),
                "control_net_name": (["None"] + control_net_list, ),
                "ctrlora1_name": (["None"] + ctrlora_list, ),
                "ctrlora2_name": (["None"] + ctrlora_list, ),
                "ctrlora3_name": (["None"] + ctrlora_list, ),
                "ctrlora4_name": (["None"] + ctrlora_list, ),
                "ctrlora5_name": (["None"] + ctrlora_list, ),
                "ctrlora6_name": (["None"] + ctrlora_list, ),
                "ctrlora7_name": (["None"] + ctrlora_list, ),
                "dtype": (["auto","fp16","bf16","fp32", "fp8_e4m3fn", "fp8_e4m3fnuz", "fp8_e5m2", "fp8_e5m2fnuz"],{"default":"auto"}),
            },
        }
        
    RETURN_TYPES = ("Ctrlora_Model", "STRING", )
    RETURN_NAMES = ("model", "ckpt_name", )
    FUNCTION = "ctrlora_loader"
    CATEGORY = "UL Group/Image Generation"
    TITLE = "Ctrlora Model Loader"
    DESCRIPTION = ""
    
    def __init__(self):
        self.last_config = None
        self.last_ckpts = None
    
    def ctrlora_loader(self, ckpt_name, clip_name, control_net_name, ctrlora1_name, ctrlora2_name, ctrlora3_name, ctrlora4_name, ctrlora5_name, ctrlora6_name, ctrlora7_name, dtype, debug=False):
        assert ckpt_name != 'None'
        assert control_net_name != 'None'
        
        ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
        clip_path = os.path.join(folder_paths.models_dir, 'clip', clip_name) if os.path.exists(os.path.join(folder_paths.models_dir, 'clip', clip_name)) else os.path.join(folder_paths.models_dir, "text_encoders", clip_name)
        
        controlnet_path = os.path.join(ckpts_under_node_dir, control_net_name) if os.path.isfile(os.path.join(ckpts_under_node_dir, control_net_name)) else folder_paths.get_full_path_or_raise("controlnet", control_net_name)
            
        ctrlora_names = [ctrlora1_name, ctrlora2_name, ctrlora3_name, ctrlora4_name, ctrlora5_name, ctrlora6_name, ctrlora7_name]
        lora_ckpts = []
        for ctrlora_filename in ctrlora_names:
            if  ctrlora_filename != 'None':
                ctrlora_path = os.path.join(ckpts_under_node_dir, ctrlora_filename) if os.path.isfile(os.path.join(ckpts_under_node_dir, ctrlora_filename)) else os.path.join(folder_paths.models_dir, "controlnet", ctrlora_filename)
                lora_ckpts.append(ctrlora_path)
        
        lora_num = len(lora_ckpts)
        current_config = get_config_base(controlnet_path)
        if lora_num > 0:
            current_config = get_config(lora_ckpts[0], lora_num)
        
        dtype = get_dtype_by_name(dtype)
        if current_config != self.last_config:
            from .Ctrlora_Scripts.cldm.model import create_model
            if debug:
                print('\033[93m', f'Loading config: {current_config}', '\033[0m')
            if not os.path.exists(clip_path):
                clip_path = None
            model = create_model(current_config, cond_stage_path=clip_path, lora_num=lora_num)
        
        if self.last_ckpts != (ckpt_path, clip_path, controlnet_path, lora_ckpts):
            load_state_dict_sd(model, ckpt_path)
            load_state_dict_cn(model, controlnet_path)
            if lora_num > 0:
                load_state_dict_lora(model, lora_ckpts)
            self.last_ckpts = (ckpt_path, clip_path, controlnet_path, lora_ckpts)
            if debug:
                print('\033[93m', f"Loading checkpoints: \n{ckpt_path}\n{clip_path}\n{controlnet_path}\n{lora_ckpts}", '\033[0m')
                print('\033[93m', f'Checkpoints loaded', '\033[0m')
            
        model = {
            'model': model.to(dtype),
            'lora_num': lora_num,
            'ckpts': self.last_ckpts,
        }
            
        return (model, ckpt_name, )
        
class Ctrlora_AIO_Preprocessor:
    @classmethod
    def INPUT_TYPES(s):
        det_choices = [
            'None', 'canny', 'hed', 'seg', 'depth', 'depth_large', 'normal', 'normal_large', 'openpose', 'hedsketch', 'grayscale', 'blur', 'pad',  # from unicontrol
            'lineart', 'lineart_coarse', 'lineart_anime', 'shuffle', 'mlsd',                                        # from controlnet v1.1
            'palette', 'pixel', 'illusion', 'densepose', 'lineart_anime_with_color_prompt',                         # proposed new conditions
        ]
        return {
            "required": {
                "image": ("IMAGE", ),
                "preprocessor": (det_choices, {"tooltip": "Do not select densepose, use comfyui_controlnet_aux's 'AIO Aux Preprocessor' node instead, it requires some librarys not easy to install."}),
                "resolution": ("INT", {"default": 512, "min": 128, "max": 1024, "steps": 1}),
                "pad_ratio": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.01}),
                },
        }

    RETURN_TYPES = ("IMAGE",)
    RETURN_NAMES = ("image", )
    FUNCTION = "execute"
    CATEGORY = "UL Group/Image Generation"
    TITLE = "Ctrlora AIO Preprocessor"
    DESCRIPTION = "AIO Aux Preprocessor from ctrlora gradio app, most models read from custom_node (comfyui_controlnet_aux) except densepose、depth_large and normal_large."

    def execute(self, preprocessor, image, resolution=512, pad_ratio=0.5):
        if preprocessor != "none":
            import cv2
            image = tensor2numpy_cv2(image)
            if preprocessor == 'canny':
                from .Ctrlora_Scripts.annotator.canny import CannyDetector
                if not isinstance(preprocessor, CannyDetector):
                    preprocessor_pipe = CannyDetector()
                params = dict(low_threshold=100, high_threshold=200)
            elif preprocessor == 'hed':
                from .Ctrlora_Scripts.annotator.hed import HEDdetector
                if not isinstance(preprocessor, HEDdetector):
                    preprocessor_pipe = HEDdetector()
                params = dict()
            elif preprocessor == 'seg':
                from .Ctrlora_Scripts.annotator.uniformer import UniformerDetector
                if not isinstance(preprocessor, UniformerDetector):
                    preprocessor_pipe = UniformerDetector()
                params = dict()
            elif preprocessor in ['depth', 'depth_large', 'normal', 'normal_large']:
                from .Ctrlora_Scripts.annotator.midas import MidasDetector
                if not isinstance(preprocessor, MidasDetector):
                    preprocessor_pipe = MidasDetector(large=('large' in preprocessor))
                params = dict()
            elif preprocessor == 'openpose':
                from .Ctrlora_Scripts.annotator.openpose import OpenposeDetector
                if not isinstance(preprocessor, OpenposeDetector):
                    preprocessor_pipe = OpenposeDetector()
                params = dict()
            elif preprocessor == 'hedsketch':
                from .Ctrlora_Scripts.annotator.hedsketch import HEDSketchDetector
                if not isinstance(preprocessor, HEDSketchDetector):
                    preprocessor_pipe = HEDSketchDetector()
                params = dict()
            elif preprocessor == 'grayscale':
                from .Ctrlora_Scripts.annotator.grayscale import GrayscaleConverter
                if not isinstance(preprocessor, GrayscaleConverter):
                    preprocessor_pipe = GrayscaleConverter()
                params = dict()
            elif preprocessor == 'blur':
                from .Ctrlora_Scripts.annotator.blur import Blurrer
                if not isinstance(preprocessor, Blurrer):
                    preprocessor_pipe = Blurrer()
                ksize = np.random.randn() * 0.5 + 0.5
                ksize = int(ksize * (50 - 5)) + 5
                ksize = ksize * 2 + 1
                params = dict(ksize=ksize)
            elif preprocessor == 'pad':
                from .Ctrlora_Scripts.annotator.pad import Padder
                if not isinstance(preprocessor, Padder):
                    preprocessor_pipe = Padder()
                params = dict(top_ratio=pad_ratio, bottom_ratio=pad_ratio, left_ratio=pad_ratio, right_ratio=pad_ratio)
            elif preprocessor in ['lineart', 'lineart_coarse']:
                from .Ctrlora_Scripts.annotator.lineart import LineartDetector
                if not isinstance(preprocessor, LineartDetector):
                    preprocessor_pipe = LineartDetector()
                params = dict(coarse=(preprocessor == 'lineart_coarse'))
            elif preprocessor in ['lineart_anime', 'lineart_anime_with_color_prompt']:
                from .Ctrlora_Scripts.annotator.lineart_anime import LineartAnimeDetector
                if not isinstance(preprocessor, LineartAnimeDetector):
                    preprocessor_pipe = LineartAnimeDetector()
                params = dict()
            elif preprocessor == 'shuffle':
                from .Ctrlora_Scripts.annotator.shuffle import ContentShuffleDetector
                if not isinstance(preprocessor, ContentShuffleDetector):
                    preprocessor_pipe = ContentShuffleDetector()
                params = dict()
            elif preprocessor == 'mlsd':
                from .Ctrlora_Scripts.annotator.mlsd import MLSDdetector
                if not isinstance(preprocessor, MLSDdetector):
                    preprocessor_pipe = MLSDdetector()
                thr_v = np.random.rand() * 1.9 + 0.1  # [0.1, 2.0]
                thr_d = np.random.rand() * 19.9 + 0.1  # [0.1, 20.0]
                params = dict(thr_v=thr_v, thr_d=thr_d)
            elif preprocessor == 'palette':
                from .Ctrlora_Scripts.annotator.palette import PaletteDetector
                if not isinstance(preprocessor, PaletteDetector):
                    preprocessor_pipe = PaletteDetector()
                params = dict()
            elif preprocessor == 'pixel':
                from .Ctrlora_Scripts.annotator.pixel import Pixelater
                if not isinstance(preprocessor, Pixelater):
                    preprocessor_pipe = Pixelater()
                n_colors = np.random.randint(8, 17)  # [8,16] -> 3-4 bits
                scale = np.random.randint(4, 9)  # [4,8]
                params = dict(n_colors=n_colors, scale=scale, down_interpolation=cv2.INTER_LANCZOS4)
            elif preprocessor == 'illusion':
                from .Ctrlora_Scripts.annotator.illusion import IllusionConverter
                if not isinstance(preprocessor, IllusionConverter):
                    preprocessor_pipe = IllusionConverter()
                params = dict()
            elif preprocessor == 'densepose':
                from .Ctrlora_Scripts.annotator.densepose import DenseposeDetector
                if not isinstance(preprocessor, DenseposeDetector):
                    preprocessor_pipe = DenseposeDetector()
                params = dict()
            
            with torch.no_grad():
                from .Ctrlora_Scripts.annotator.util import HWC3, resize_image
                input_image = HWC3(image)
                resized_image = resize_image(input_image, resolution)
                detected_map = preprocessor_pipe(resized_image, **params)
                if 'depth' in preprocessor:
                    detected_map = detected_map[0]
                elif 'normal' in preprocessor:
                    detected_map = detected_map[1]
                detected_map = HWC3(detected_map)
                image = numpy_cv2tensor(detected_map)
            
        return (image, )
        
class Ctrlora_Sampler:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "ctrlora_params": ("Ctrlora_Params",),
                "ctrlora_model": ("Ctrlora_Model", ),
                "prompt": ("STRING", {"default": "masterpiece, best quality, \n\n'General-long': masterpiece, best quality, high quality, award winning, award-winning, \n\n'Realistic': RAW photo, 8K UHD, DSLR, film grain, highres, high resolution, high detail, extremely detailed, soft lighting, award winning photography.", "multiline": True}),
                "n_prompt": ("STRING", {"default": "worst quality, low quality, NSFW, \n\n'General-long': worst quality, low quality, bad quality, normal quality, lowres, low resolution, JPEG artifacts, blurry, bad composition, cropped, mutilated, out of frame, duplicate, multiple views, multiple_views, tiling, ugly, morbid, distorted, disgusting, watermark, signature.\n\n'General-human': bad anatomy, wrong anatomy, bad proportions, gross proportions, deformed, deformed iris, deformed pupils, inaccurate eyes, cross-eye, cloned face, bad hands, mutation, mutated hands, mutation hands, mutated fingers, mutation fingers, fused fingers, too many fingers, extra fingers, extra digit, missing fingers, fewer digits, malformed limbs, inaccurate limb, extra limbs, missing limbs, floating limbs, disconnected limbs, extra arms, extra legs, missing arms, missing legs, error, bad legs, error legs, bad feet, long neck, disfigured, amputation, dehydrated, nude, thighs, cleavage.\n\n'Realistic': semi-realistic, CGI, 3D, render, sketch, drawing, comic, cartoon, anime, vector art.\n\n'2.5D': sketch, drawing, comic, cartoon, anime, vector art.\n\n'Painting': photorealistic, CGI, 3D, render.", "multiline": True}),
                "seed": ("INT", {"default": 88888888, "min": 0, "max": 4294967296}),
                "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "Default ddim steps to 50."}),
                "strength": ("FLOAT", {"default": 1.00, "min": 0, "max": 2, "step": 0.01}),
                "cfg": ("FLOAT", {"default": 7.5, "min": 0.0, "max": 14.0, "step": 0.1, "round": 0.01, "tooltip": "Default ddim cfg to 7.5."}),
                "eta": ("FLOAT", {"default": 0, "min": 0, "max": 1, "step": 0.1}),
                "batch_size": ("INT", {"default": 1, "min": 1, "max": 10}),
                "guess_mode": ("BOOLEAN", {"default": False, "label_on": "yes", "label_off": "no"}),
                "device": (["auto", "cuda", "cpu", "mps", "xpu", "meta"],{"default": "auto"}), 
                "keep_model_loaded": ("BOOLEAN", {"default": True, "label_on": "yes", "label_off": "no", "tooltip": "Warning: do not delete model unless this node no longer needed, it will try release device_memory and ram. if checked and want to continue node generation, use ComfyUI-Manager `Free model and node cache` to reset node state or change parameter in Loader node to activate.\n注意：仅在这个节点不再需要时删除模型，将尽量释放系统内存和设备专用内存。如果删除后想继续使用此节点，使用ComfyUI-Manager插件的`Free model and node cache`重置节点状态或者更换模型加载节点的参数来激活。"}),
                "keep_model_device": ("BOOLEAN", {"default": True, "label_on": "comfy", "label_off": "device", "tooltip": "Keep model in comfy_auto_unet_offload_device (HIGH_VRAM: device, Others: cpu) or device_memory after generation.\n生图完成后，模型转移到comfy自动选择设备(HIGH_VRAM: device, 其他: cpu)或者保留在设备专用内存上。"}),
            },
        }

    RETURN_TYPES = ("IMAGE",)
    RETURN_NAMES = ("image",)
    FUNCTION = "Ctrlora_Sampler"
    CATEGORY = "UL Group/Image Generation"
    TITLE = "Ctrlora Sampler"
    DESCRIPTION = "ReadME from original ctrlora repo: We first train a Base ControlNet along with condition-specific LoRAs on base conditions with a large-scale dataset. Then, our Base ControlNet can be efficiently adapted to novel conditions by new LoRAs with as few as 1,000 images and less than 1 hour on a single GPU."
    
    def __init__(self):
        self.model = None
        self.ckpts = None

    def Ctrlora_Sampler(self, ctrlora_model, ctrlora_params, prompt, n_prompt, seed, steps, strength, cfg, eta, batch_size, guess_mode, device, keep_model_device, keep_model_loaded=False):
        device = get_device_by_name(device)
        from pytorch_lightning import seed_everything
        from .Ctrlora_Scripts.cldm.ddim_hacked import DDIMSampler
        
        if self.model == None or self.ckpts!= ctrlora_model['ckpts']:
            self.model = ctrlora_model['model'].to(device)
            self.ckpts = ctrlora_model['ckpts']
        else:
            if 'cpu' in self.model.device.type:
                self.model.to(device)
                
        dtype = self.model.dtype
        
        cotrol_img_num = ctrlora_model['lora_num'] if ctrlora_model['lora_num'] > 1 else 1
        
        ddim_sampler = DDIMSampler(self.model)
        seed_everything(seed)
        
        with torch.no_grad():
            W, H = ctrlora_params['W'], ctrlora_params['H']
            cond = []
            un_cond = []
            for i, img in enumerate(ctrlora_params['images']):
                if (i + 1) <= cotrol_img_num:
                    control = img.to(device)
                    control = torch.stack([control for _ in range(batch_size)], dim=0)
                    control = einops.rearrange(control, 'b h w c -> b c h w').clone()
                
                    # 半精度推理
                    control = control.to(dtype)

                    condition = {"c_concat": [control], "c_crossattn": [self.model.get_learned_conditioning([prompt] * batch_size)]}
                    un_condition = {"c_concat": None if guess_mode else [control], "c_crossattn": [self.model.get_learned_conditioning([n_prompt] * batch_size)]}

                    self.model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
                    # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
                    
                    cond.append(condition)
                    un_cond.append(un_condition)
            if cotrol_img_num == 1:
                cond = cond[0]
                un_cond = un_cond[0]

            if ctrlora_model['lora_num'] > 1:
                lora_weights = ctrlora_params['lora_weights']
                self.model.lora_weights = []
                for i, lora_weight in enumerate(lora_weights):
                    if (i + 1) <= ctrlora_model['lora_num']:
                        self.model.lora_weights.append(lora_weight)
            
            shape = (4, H // 8, W // 8)
            Comfy_ProgressBar = ProgressBar(steps)
            def callback(*_):
                Comfy_ProgressBar.update(1)
            samples, intermediates = ddim_sampler.sample(steps, batch_size,
                                                        shape, cond, verbose=False, eta=eta,
                                                        unconditional_guidance_scale=cfg,
                                                        unconditional_conditioning=un_cond,
                                                        callback=callback,
                                                        )
            
            samples = samples.to(dtype)
            x_samples = self.model.decode_first_stage(samples)
            x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
            results = [x_samples[i] for i in range(batch_size)]
            
            result = []
            for img in results:
                image = numpy_cv2tensor(img)
                result.append(image)
            
            result = torch.cat(result, dim=0)
            
            if keep_model_loaded:
                if keep_model_device:
                    self.model.to(unet_offload_device())
                    clean_up()
            else:
                del ctrlora_model['model']
                self.model = None
                clean_up()
        
        return (result, )
        
class Ctrlora_Condition_Params:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "control_img1": ("IMAGE",),
                "img_resolution": ("INT", {"default": 512, "min": 128, "max": 3840, "step": 64, "tooltip": "The shorter side of target_image, the longer side depends on input image."}),
            },
            "optional": {
                "lora1_weight": ("FLOAT", {"default": 1.00, "min": 0, "max": 2, "step": 0.01}),
                "control_img2": ("IMAGE", ),
                "lora2_weight": ("FLOAT", {"default": 1.00, "min": 0, "max": 2, "step": 0.01}),
                "control_img3": ("IMAGE", ),
                "lora3_weight": ("FLOAT", {"default": 1.00, "min": 0, "max": 2, "step": 0.01}),
                "control_img4": ("IMAGE", ),
                "lora4_weight": ("FLOAT", {"default": 1.00, "min": 0, "max": 2, "step": 0.01}),
                "control_img5": ("IMAGE", ),
                "lora5_weight": ("FLOAT", {"default": 1.00, "min": 0, "max": 2, "step": 0.01}),
                "control_img6": ("IMAGE", ),
                "lora6_weight": ("FLOAT", {"default": 1.00, "min": 0, "max": 2, "step": 0.01}),
                "control_img7": ("IMAGE", ),
                "lora7_weight": ("FLOAT", {"default": 1.00, "min": 0, "max": 2, "step": 0.01}),
            }
        }

    RETURN_TYPES = ("Ctrlora_Params", "IMAGE", )
    RETURN_NAMES = ("ctrlora_params", "image", )
    FUNCTION = "params"
    CATEGORY = "UL Group/Image Generation"
    TITLE = "Ctrlora Params"

    def params(self, img_resolution, lora1_weight, lora2_weight, lora3_weight, lora4_weight, lora5_weight, lora6_weight, lora7_weight, control_img1, control_img2=None, control_img3=None, control_img4=None, control_img5=None, control_img6=None, control_img7=None):
        from .Ctrlora_Scripts.annotator.util import resize_image
        
        imgs = [control_img1, control_img2, control_img3, control_img4, control_img5, control_img6, control_img7]
        
        first_img = tensor2numpy_cv2(control_img1)
        H, W, C = resize_image(first_img, img_resolution).shape
        
        images = []
        for img in imgs:
            if img != None:
                img = tensor2numpy_cv2(img)
                img = cv2.resize(img, (W, H), interpolation=cv2.INTER_LINEAR)
                H_i, W_i, C_i = img.shape
                # center crop to smaller image
                if H_i > H:
                    img = img[(H_i-H)//2:(H_i+H)//2]
                else:
                    img = img[(H-H_i)//2:(H+H_i)//2]
                if W_i > W:
                    img = img[:, (W_i-W)//2:(W_i+W)//2]
                else:
                    img = img[:, (W-W_i)//2:(W+W_i)//2]
                H_i, W_i, C_i = img.shape
                assert H == H_i and W == W_i
                img = numpy_cv2tensor(img)
                images.append(img)
        images = torch.cat(images, dim=0)
                
        
        ctrlora_params = {
            'images': images,
            'lora_weights': [lora1_weight, lora2_weight, lora3_weight, lora4_weight, lora5_weight, lora6_weight, lora7_weight],
            'H': H,
            'W': W,
        }
        
        return(ctrlora_params, images, )
        
        
NODE_CLASS_MAPPINGS = {
    "UL_Image_Generation_Ctrlora_loader": Ctrlora_Model_Loader,
    "UL_Image_Generation_Ctrlora_AIO_Preprocessor": Ctrlora_AIO_Preprocessor,
    "UL_Image_Generation_Ctrlora_Sampler": Ctrlora_Sampler,
    "UL_Image_Generation_Ctrlora_Condition_Params": Ctrlora_Condition_Params,
}
        
def check_key(k):
    return 'lora_layer' in k or 'zero_convs' in k or 'middle_block_out' in k or 'norm' in k


def load_state_dict_sd(model, sd_ckpt):
    state_dict = load_torch_file(sd_ckpt, safe_load=True)
    model.load_state_dict(state_dict, strict=False)  # noqa
    del state_dict


def load_state_dict_cn(model, cn_ckpt):
    state_dict = load_torch_file(cn_ckpt)
    state_dict = {k: v for k, v in state_dict.items() if k.startswith('control_model') and not check_key(k)}
    model.load_state_dict(state_dict, strict=False)  # noqa
    del state_dict


def load_state_dict_lora(model, lora_ckpts):
    for i, lora_ckpt in enumerate(lora_ckpts):
        state_dict = load_torch_file(lora_ckpt, safe_load=True)
        state_dict = {k: v for k, v in state_dict.items() if check_key(k)}
        model.control_model.switch_lora(i)
        model.load_state_dict(state_dict, strict=False)  # noqa
        model.control_model.copy_weights_to_switchable()
        del state_dict


def get_config(lora_ckpt, lora_num=1):
    # if lora_num == 1:
    if lora_num >= 1:
        if 'rank32' in lora_ckpt:
            current_config = os.path.join(CONFIG_DIR, 'inference/ctrlora_sd15_rank32_1lora.yaml')
        elif 'rank64' in lora_ckpt:
            current_config = os.path.join(CONFIG_DIR, 'inference/ctrlora_sd15_rank64_1lora.yaml')
        elif 'rank128' in lora_ckpt:
            current_config = os.path.join(CONFIG_DIR, 'inference/ctrlora_sd15_rank128_1lora.yaml')
        elif 'rank256' in lora_ckpt:
            current_config = os.path.join(CONFIG_DIR, 'inference/ctrlora_sd15_rank256_1lora.yaml')
        elif 'rank512' in lora_ckpt:
            current_config = os.path.join(CONFIG_DIR, 'inference/ctrlora_sd15_rank512_1lora.yaml')
        else:
            raise ValueError('Unknown config')
    # elif lora_num == 2:
    # elif lora_num >= 2:
    #     if 'rank128' in lora_ckpt:
    #         current_config = os.path.join(CONFIG_DIR, 'inference/ctrlora_sd15_rank128_2loras.yaml')
    #     else:
    #         raise ValueError('Unknown config')
    else:
        raise ValueError('Unknown config')
    return current_config

def reformat_prompt(prompt):
    import re
    prompt = re.sub(r'\[\[', ',', prompt)
    prompt = re.sub(r']]', ',', prompt)
    prompt = re.sub(r'\n', ',', prompt)
    prompt = re.sub(r'\s+', ' ', prompt)
    prompt = re.sub(r',\s+', ',', prompt)
    prompt = re.sub(r'\s+,', ',', prompt)
    prompt = re.sub(r',+', ',', prompt)
    prompt = prompt.strip(',').strip()
    prompt = re.sub(r',', ', ', prompt)
    return prompt

def get_config_base(cn_ckpt):
    if 'ctrlora' in cn_ckpt:
        return os.path.join(CONFIG_DIR, 'ctrlora_finetune_sd15_full.yaml')
    elif 'cnlite' in cn_ckpt:
        return os.path.join(CONFIG_DIR, 'cnlite_sd15.yaml')
    elif 'cnxs' in cn_ckpt:
        return os.path.join(CONFIG_DIR, 'cnxs_sd15.yaml')
    elif 'cn' in cn_ckpt:
        return os.path.join(CONFIG_DIR, 'cldm_v15.yaml')
    else:
        raise ValueError(f'Unknown cn_ckpt: {cn_ckpt}')