import os
os.environ['CUDA_VISIBLE_DEVICES']='0'

import torch,pdb
from diffusers import FluxControlPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from util_flux import process_img_1024,vertical_concat_images,horizontal_concat_images
from util_flux import resize_with_aspect
from PIL import Image
# from image_gen_aux import DepthPreprocessor
from controlnet_aux import CannyDetector
from itertools import product
from util_for_os import osj,ose

from MODEL_CKP import FLUX_CANNY

# pdb.set_trace()

FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
FLUX='/data/models/FLUX___1-dev'

DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'

types = ['collar','sleeve','pockets']
# choose_type = types[0]
examples_dir = lambda t: f'/mnt/nas/shengjie/datasets/cloth_{t}_localimg'
# examples_dir2 = lambda t: f'/mnt/nas/shengjie/datasets/cloth_{t}_balanced'
# examples_dir3 = lambda t: f'/mnt/nas/shengjie/datasets/cloth_{t}_localimg_mask'
# save_dir = '/data/shengjie/synthesis_zhenzhi/'
other_dir = './redux_other'

# imagefiles = os.listdir(examples_dir)

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                    FLUX_REDUX, 
                                    torch_dtype=torch.bfloat16).to("cuda")
# from lotus.app_infer_depth import load_pipe_g,get_depth_by_lotus_g
# depth_processor = load_pipe_g('d')
# depth_processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)
processor = CannyDetector()

pipe = FluxControlPipeline.from_pretrained(FLUX_CANNY, torch_dtype=torch.bfloat16).to("cuda")

def get_whitemask_pos_and_size(mask_img: Image.Image):
    """
    从掩码图像中提取白色区域的位置和尺寸
    
    参数:
        mask_img: PIL图像对象(单通道掩码)
    
    返回:
        tuple: (位置坐标(x,y), 尺寸(width,height))
    """
    # 实现您的掩码分析逻辑
    # 这里应该是您的实际实现代码
    # 示例伪代码:
    bbox = mask_img.getbbox()  # 获取非零区域边界框
    if not bbox:
        raise ValueError("掩码图像中没有有效区域")
    
    left, upper, right, lower = bbox
    position = (left, upper)
    size = (right - left, lower - upper)
    
    return position, size

def smart_crop_with_extend(
    ori_img: Image.Image,
    pos,
    tar_size,
    extend_size: int = 50
):
    """
    智能扩展裁剪 - 在目标区域周围扩展裁剪，同时确保不超出原图边界
    
    参数:
        ori_img: 原始PIL图像对象
        pos: 目标区域左上角坐标(x,y)
        tar_size: 目标区域尺寸(width,height)
        extend_size: 四周扩展像素数
    
    返回:
        tuple: (调整后的坐标, 调整后的尺寸, 裁剪后的图像)
    
    异常:
        ValueError: 当输入参数无效时
    """
    # 输入验证
    if not isinstance(ori_img, Image.Image):
        raise ValueError("ori_img 必须是PIL.Image对象")
    if len(pos) != 2 or any(p < 0 for p in pos):
        raise ValueError("pos 必须是有效的(x,y)坐标")
    if len(tar_size) != 2 or any(s <= 0 for s in tar_size):
        raise ValueError("tar_size 必须是有效的(width,height)尺寸")
    if extend_size < 0:
        raise ValueError("extend_size 不能为负数")

    # 获取原图尺寸
    ori_width, ori_height = ori_img.size
    
    # 计算扩展后的新坐标和尺寸
    x, y = pos
    width, height = tar_size
    
    # 计算扩展区域（确保不超出原图边界）
    new_x = max(x - extend_size, 0)
    new_y = max(y - extend_size, 0)
    
    # 计算扩展后的宽度和高度
    new_width = min(width + 2 * extend_size, ori_width - new_x)
    new_height = min(height + 2 * extend_size, ori_height - new_y)
    
    # 验证计算后的尺寸有效性
    if new_width <= 0 or new_height <= 0:
        raise ValueError("扩展后的裁剪区域尺寸无效")

    # 执行裁剪
    crop_box = (new_x, new_y, new_x + new_width, new_y + new_height)
    cropped_img = ori_img.crop(crop_box)
    
    # 返回调整后的坐标(相对于原图)、尺寸和裁剪图像
    return (new_x, new_y), (new_width, new_height),crop_box, cropped_img


# 各取五张 然后交叉 product 生成
num = 5

# 同类
for t_id in range(len(types)):
    clo_list1 = []
    clo_list2 = []
    t1 , t2 = types[t_id] , types[t_id]
    # t1 , t2 = types[t_id] , types[ ((t_id)+1)%len(types) ]

    dir1 , dir2 = examples_dir(t1) , examples_dir(t2)
    # dir1 , dir2 = examples_dir(t1) , other_dir
    if dir1 == dir2:
        for entry in os.scandir(dir1):
            filename = entry.name
            if not filename.endswith('.jpg'):continue
            # break
            if len(clo_list1)!=num:
                clo_list1.append(
                    osj( dir1 , filename )
                )
            elif len(clo_list2)!=num:
                clo_list2.append(
                    osj( dir2 , filename )
                )
            else:
                break
    else:
        for entry in os.scandir(dir1):
            filename = entry.name
            if not filename.endswith('.jpg'):continue
            # break
            if len(clo_list1)!=num:
                clo_list1.append(
                    osj( dir1 , filename )
                )
            else:break
        for entry in os.scandir(dir2):
            filename = entry.name
            if not filename.endswith('.jpg'):continue
            # break
            if len(clo_list2)!=num:
                clo_list2.append(
                    osj( dir2 , filename )
                )
            else:break

    # pdb.set_trace()
    
    def get_mask(size=(1024,1024),fill=(255,255,255)):
        return Image.new('RGB',size,color=fill)
    res_imgs = [
        [get_mask()]+[ process_img_1024(path) for path in clo_list1 ],
        *[[process_img_1024(path)] for path in clo_list2], 
    ]
    # pdb.set_trace()
    # 每生成 num 个 就插入到 res_imgs
    idx = 0
    for c1,c2 in product(clo_list1,clo_list2):
        '''
        构建
        /        原图 c1-1  c1-2  c1-3 ...
        原图c2-1     合成1  合成2 合成3 ...
        c2-2        ...
        c2-3
        ...
        '''

        control_image_path = c1
        redux_image_path = c2

        # 拼到原来的衣服上
        # ori_path = osj(examples_dir2,filename)
        # mask_path = osj(examples_dir3,filename.replace('.jpg','.png'))
        # ori_img = Image.open( ori_path )
        # mask_img = Image.open( mask_path )
        # assert ori_img.size == mask_img.size,f'{ori_img.size} , {mask_img.size}'

        # pos , tar_size = get_whitemask_pos_and_size(mask_img)

        # pdb.set_trace()


        control_image = process_img_1024( control_image_path )
        redux_image = process_img_1024( redux_image_path )


        with torch.no_grad():
            # 这里试试不用衣服的redux相加
            # tmp_path = 'tmp_other.jpg'
            prompt_emb,pooled_prompt_emb = pipe_prior_redux(control_image,
                                                            return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
            prompt_emb2,pooled_prompt_emb2 = pipe_prior_redux(redux_image,return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
            alpha = 0.8
            prompt_emb = (1-alpha) * prompt_emb + alpha * prompt_emb2
            pooled_prompt_emb = (1-alpha) * pooled_prompt_emb + alpha * pooled_prompt_emb2

        control_image = resize_with_aspect(control_image_path)

        # control_image = processor(
        #     control_image
        # )[0].convert("RGB") # PIL 768 1024
        control_image = processor(control_image, 
                                  low_threshold=50, 
                                  high_threshold=200, 
                                  detect_resolution=1024, 
                                  image_resolution=1024)


        # print(control_image.size)

        with torch.no_grad():
            image = pipe(
                # prompt=prompt,
                control_image=control_image,
                height=control_image.height,
                width=control_image.width,
                num_inference_steps=8,
                guidance_scale=10.0,
                # generator=torch.Generator().manual_seed(42),
                # **pipe_prior_output,
                prompt_embeds=prompt_emb,
                pooled_prompt_embeds=pooled_prompt_emb,
            ).images[0]

        res_imgs[idx%num + 1].append(
            process_img_1024('',img_pil=image)
        )

        idx += 1 


        concat_tmp_res = horizontal_concat_images( [ 
            process_img_1024('',img_pil=control_image) , 
            process_img_1024(control_image_path),
            redux_image ,   
            process_img_1024('',img_pil=image) ,
            ] )

        concat_tmp_res.save('tmp_redux2.jpg')

    res_imgs_hori_concat = [
        horizontal_concat_images(ri) for ri in res_imgs
    ]
    res_imgs_verti_concat = vertical_concat_images(res_imgs_hori_concat)
    res_imgs_verti_concat.save(f'tmp_redux_{t_id}.jpg')
    # pdb.set_trace()