import os
from typing import Literal
import gradio as gr
#import sys
#sys.path.append('.')
if gr.NO_RELOAD:
    from diffusers import StableDiffusionXLPipeline
    from diffusers import EulerAncestralDiscreteScheduler
    from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, EulerAncestralDiscreteScheduler, AutoencoderKL
    from diffusers.utils import load_image, make_image_grid
    from other_pipeline import SDXLLongPromptWeightingPipeline

    import torch
    import numpy as np
    from PIL import Image
    import logging
    import datetime
    

MODEL_PATH = "ckpts"
CONFIG_PATH = "configs"
GENERATE_PATH = "generate_images" # 生成图片保存路径
SCHEDULER = []


def create_logger(name,log_dir='./logs', level=20):
    # 日志
    logger = logging.getLogger(name) # 1 日志器
    logger.setLevel(level=level)
    if os.path.exists(log_dir):
        pass
    else:
        os.makedirs(log_dir)
    logname = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    fh = logging.FileHandler(f'{log_dir}/{logname}.log',mode='w') # 2处理器
    sh = logging.StreamHandler() # 2处理器

    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s',datefmt='%Y-%m-%d %H:%M:%S') # 3 格式化器
    fh.setFormatter(formatter) # 4 格式化器加入处理器
    sh.setFormatter(formatter) # 4 格式化器加入处理器
    logger.addHandler(fh) # 5 处理器加入日志器
    logger.addHandler(sh) # 5 处理器加入日志器
    return logger,logname



def get_scheduler(scheduler):
    '''调度器讲解 https://blog.csdn.net/Liudef06/article/details/146003491
    Args:
        scheduler名称
    Returns:
        scheduler
    '''
    from diffusers import DPMSolverMultistepScheduler,EulerAncestralDiscreteScheduler

    config_id = 'anime_illust_diffusion_xl'
    if scheduler == "DPM++ 2M Karras":
        DPM_plusplus_2M_Karras = DPMSolverMultistepScheduler.from_pretrained(
            config_id,
            subfolder="scheduler",
            algorithm_type="dpmsolver++",
            solver_multistep_mode="2m",  # 指定为 DPM++ 2M
            use_karras_sigmas=True       # 启用 Karras sigma 调度
        )
        return DPM_plusplus_2M_Karras
    elif scheduler == "Euler a":
        euler_a = EulerAncestralDiscreteScheduler.from_pretrained(config_id, subfolder="scheduler")


def list_models():
    """列出可用模型"""
    ckpts = os.listdir("ckpts")
    return ckpts

def get_pipe(sd_model,vae,scheduler,mode='t2i',control:Literal['t2i-adapter',None] =None):
    """获取pipeline"""
    config_id = "configs/sdxl"
    model_id = os.path.join(MODEL_PATH, sd_model)

    if (mode=='t2i' or mode=="i2i") and control==None:
        euler_a = EulerAncestralDiscreteScheduler.from_pretrained(config_id, subfolder="scheduler")
        pipe = SDXLLongPromptWeightingPipeline.from_single_file(
            model_id,
            config = config_id,
            scheduler=euler_a,
            local_files_only=True, 
            torch_dtype= torch.float16, 
            use_safetensors   = True, 
            variant= "fp16"
            )
        return pipe

    elif control == 't2i-adapter':
        # load adapter
        adapter = T2IAdapter.from_pretrained(
            "t2i-adapter-openpose-sdxl-1.0", 
            torch_dtype=torch.float16, 
            varient="fp16",
            local_files_only=True
            )

        # load pipeline
        euler_a = EulerAncestralDiscreteScheduler.from_pretrained(config_id, subfolder="scheduler")
        pipe = StableDiffusionXLAdapterPipeline.from_single_file(
            model_id, 
            adapter=adapter, scheduler=euler_a,
            torch_dtype=torch.float16,
            config=config_id,
            local_files_only=True)
        return pipe

def clean_gpu_cache():
    """清理显存"""
    allocated_memory = torch.cuda.memory_allocated()
    max_allocated_memory = torch.cuda.max_memory_allocated()
    print(f"自程序开始以来的最大显存使用量: {max_allocated_memory / (1024 ** 3):.2f} GB")
    print(f"当前已分配的显存: {allocated_memory / (1024 ** 3):.2f} GB")
    torch.cuda.empty_cache()

def resize_to_nearest_multiple_of_8(image: Image.Image|np.ndarray, multiple=8):
    """
    pil or np.array
    将图像 resize 到最接近的 8 的倍数大小
    """
    if isinstance(image,Image.Image):
        width, height = image.size
        new_width = (width // multiple) * multiple
        new_height = (height // multiple) * multiple
        return image.resize((new_width, new_height), resample=Image.Resampling.LANCZOS)
    elif isinstance(image,np.ndarray):
        image = Image.fromarray(image)
        width, height = image.size
        new_width = (width // multiple) * multiple
        new_height = (height // multiple) * multiple
        return np.array(image.resize((new_width, new_height), resample=Image.Resampling.LANCZOS))

def get_mask(np1,np2):
    """
    给定两张numpy图片，计算差异并返回mask，不同的位置是1，相同的位置是0
    """
    np1 = resize_to_nearest_multiple_of_8(np1)
    np2 = resize_to_nearest_multiple_of_8(np2)
    diff = np.abs(np1 - np2)
    print('np1:',np1.shape)
    print('np2:',np2.shape)
    if diff.sum()!=0:

        # 判断是否有差异（任一通道超过阈值）
        mask = (diff > 1).any(axis=2)  # shape: (H, W)

        mask = mask.astype(np.float32)[ np.newaxis,...]

        mask_tensor = torch.from_numpy(mask).unsqueeze(0)
        print(mask_tensor.shape)
    else:
        mask_tensor = None
    return mask_tensor