import torch
from diffusers import DPMSolverMultistepScheduler
from PIL import Image
from DeepCache import DeepCacheSDHelper
from prompt_utils import *
from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL, EulerAncestralDiscreteScheduler, StableDiffusionXLPipeline
import cv2
import numpy as np
from logging_config import setup_logger
from model_config import MODEL_CONFIG, LORA_CONFIG, STYLE_CONFIG

logger = setup_logger(__name__)

seed_value = 2025  # 你可以选择任意整数作为种子
generator = torch.Generator().manual_seed(seed_value)

class CartoonGenerator:

    def __init__(
        self,
        guidance_scale=2,
        base_strength=0.8,
        num_inference_steps=30,
        device="cuda",
    ):
        logger.info("Initializing CartoonGenerator...")
        self.pipe = self.init_model(device)
        self.guidance_scale = guidance_scale
        self.base_strength = base_strength
        self.num_inference_steps = num_inference_steps
        self.negative_prompt = linedraw_negative_prompt
        self.prompt = linedraw_prompt
        logger.info("CartoonGenerator initialized successfully")

    def init_model(self, device):
        """Initializes the SDXL model with the LoRA layers and FreeU diffusion process enhancement."""
        base_config = MODEL_CONFIG["base_model"]
        pipe = StableDiffusionXLPipeline.from_single_file(
            base_config["path"],
            torch_dtype=torch.float16,
            use_safetensors=base_config["use_safetensors"],
            local_files_only=base_config["local_files_only"],
            enable_pag=base_config["enable_pag"],
            pag_applied_layers=base_config["pag_applied_layers"],
        )

        # load ControlNet
        controlnet_config = MODEL_CONFIG["controlnet"]
        controlnet = ControlNetModel.from_pretrained(
            controlnet_config["path"], 
            torch_dtype=torch.float16, 
            variant="fp16",
            **{k: v for k, v in controlnet_config.items() if k != "path"}
        )
    
        pipe = StableDiffusionXLControlNetPipeline(**pipe.components, controlnet=controlnet)
        # pipe.enable_xformers_memory_efficient_attention()

        helper = DeepCacheSDHelper(pipe=pipe)
        helper.set_params(
            cache_interval=3,
            cache_branch_id=0,
        )
        helper.enable()
        # pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)

        # enable FreeU diffusion process enhancement
        pipe.enable_freeu(s1=1.2, s2=1.5, b1=1.1, b2=1.2)
        # Define a faster scheduler
        pipe.scheduler = DPMSolverMultistepScheduler.from_config(
            pipe.scheduler.config
        )


        pipe.to(device)
        
        # 加载默认LoRA
        default_lora = LORA_CONFIG["default"]
        pipe.load_lora_weights(
            default_lora["path"],
            weight_name=default_lora["weight_name"],
            adapter_name=default_lora["adapter_name"],
            local_files_only=True
        )
        pipe.set_adapters([default_lora["adapter_name"]], adapter_weights=[1.0])
        pipe.fuse_lora()
        logger.info("Model initialization completed")
        return pipe

    def switch_lora(self, style):
        self.pipe.unfuse_lora()
        self.pipe.unload_lora_weights()
        
        # 获取风格配置
        style_config = STYLE_CONFIG.get(style, STYLE_CONFIG["default"])
        lora_config = LORA_CONFIG.get(style, LORA_CONFIG["default"])
        
        # 加载主LoRA
        self.pipe.load_lora_weights(
            lora_config["path"],
            weight_name=lora_config["weight_name"],
            adapter_name=lora_config["adapter_name"],
            local_files_only=True
        )
        
        # 检查是否有额外的LoRA需要加载
        adapters = [lora_config["adapter_name"]]
        weights = [1.0]
        
        if "additional_lora" in lora_config:
            additional = lora_config["additional_lora"]
            self.pipe.load_lora_weights(
                additional["path"],
                weight_name=additional["weight_name"],
                adapter_name=additional["adapter_name"],
                local_files_only=True
            )
            adapters.append(additional["adapter_name"])
            weights.append(additional["weight"])
        
        # 设置适配器
        self.pipe.set_adapters(adapters, adapter_weights=weights)
        self.pipe.fuse_lora()
        
        # 更新风格参数
        self.prompt = style_config["prompt"]
        self.guidance_scale = style_config["guidance_scale"]
        self.base_strength = style_config["base_strength"]
        self.controlnet_conditioning_scale = style_config["controlnet_conditioning_scale"]
        
        logger.info(f"LoRA style switched to {style} successfully")

    def canny_procecss(self, image: Image):
        image = np.array(image)

        low_threshold = 100
        high_threshold = 200

        image = cv2.Canny(image, low_threshold, high_threshold)
        image = image[:, :, None]
        image = np.concatenate([image, image, image], axis=2)
        image = Image.fromarray(image)
        
        return image
    
    def generate_avatar(self, image, height, width, style) -> Image:
        # logger.info(f"Generating avatar with dimensions: {width}x{height}")
        try:
            canny_image = self.canny_procecss(image=image)
            if style == "Anime":
                result = self.pipe(
                            prompt=self.prompt,
                            image=canny_image,
                            height=height,
                            width = width,
                            negative_prompt=self.negative_prompt,
                            strength=self.base_strength,
                            guidance_scale=self.guidance_scale,
                            num_inference_steps=self.num_inference_steps,
                            controlnet_conditioning_scale = self.controlnet_conditioning_scale,
                            pag_scale=3.0,
                            clip_skip = 1,
                            generator = torch.manual_seed(0)
                        ).images[0]
            else:
                result = self.pipe(
                            prompt=self.prompt,
                            image=canny_image,
                            height=height,
                            width = width,
                            negative_prompt=self.negative_prompt,
                            strength=self.base_strength,
                            guidance_scale=self.guidance_scale,
                            num_inference_steps=self.num_inference_steps,
                            controlnet_conditioning_scale = self.controlnet_conditioning_scale,
                            pag_scale=3.0,
                            clip_skip = 1,
                            generator = generator
                        ).images[0]
            # 清理GPU内存
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            logger.info("CartoonGenerator generation completed successfully")
            return result
        except Exception as e:
            logger.error(f"Error generating: {str(e)}", exc_info=True)
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            return image
            raise
