import os
os.environ['CUDA_VISIBLE_DEVICES']='0'

import torch,pdb
from diffusers import FluxControlPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from util_flux import process_img_1024,concat_half,horizontal_concat_images
from util_flux import resize_with_aspect
from PIL import Image
from image_gen_aux import DepthPreprocessor

# pdb.set_trace()

FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
FLUX='/data/models/FLUX___1-dev'

DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'

types = ['collar','sleeve','pockets']
choose_type = types[0]
examples_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_localimg'
examples_dir2 = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced'
examples_dir3 = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_localimg_mask'
# save_dir = '/data/shengjie/synthesis_zhenzhi/'

# imagefiles = os.listdir(examples_dir)

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                    FLUX_REDUX, 
                                    torch_dtype=torch.bfloat16).to("cuda")
# from lotus.app_infer_depth import load_pipe_g,get_depth_by_lotus_g
# depth_processor = load_pipe_g('d')
depth_processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)

pipe = FluxControlPipeline.from_pretrained(FLUX_DEPTH, torch_dtype=torch.bfloat16).to("cuda")

def get_whitemask_pos_and_size(mask_img: Image.Image):
    """
    从掩码图像中提取白色区域的位置和尺寸
    
    参数:
        mask_img: PIL图像对象(单通道掩码)
    
    返回:
        tuple: (位置坐标(x,y), 尺寸(width,height))
    """
    # 实现您的掩码分析逻辑
    # 这里应该是您的实际实现代码
    # 示例伪代码:
    bbox = mask_img.getbbox()  # 获取非零区域边界框
    if not bbox:
        raise ValueError("掩码图像中没有有效区域")
    
    left, upper, right, lower = bbox
    position = (left, upper)
    size = (right - left, lower - upper)
    
    return position, size

def smart_crop_with_extend(
    ori_img: Image.Image,
    pos,
    tar_size,
    extend_size: int = 50
):
    """
    智能扩展裁剪 - 在目标区域周围扩展裁剪，同时确保不超出原图边界
    
    参数:
        ori_img: 原始PIL图像对象
        pos: 目标区域左上角坐标(x,y)
        tar_size: 目标区域尺寸(width,height)
        extend_size: 四周扩展像素数
    
    返回:
        tuple: (调整后的坐标, 调整后的尺寸, 裁剪后的图像)
    
    异常:
        ValueError: 当输入参数无效时
    """
    # 输入验证
    if not isinstance(ori_img, Image.Image):
        raise ValueError("ori_img 必须是PIL.Image对象")
    if len(pos) != 2 or any(p < 0 for p in pos):
        raise ValueError("pos 必须是有效的(x,y)坐标")
    if len(tar_size) != 2 or any(s <= 0 for s in tar_size):
        raise ValueError("tar_size 必须是有效的(width,height)尺寸")
    if extend_size < 0:
        raise ValueError("extend_size 不能为负数")

    # 获取原图尺寸
    ori_width, ori_height = ori_img.size
    
    # 计算扩展后的新坐标和尺寸
    x, y = pos
    width, height = tar_size
    
    # 计算扩展区域（确保不超出原图边界）
    new_x = max(x - extend_size, 0)
    new_y = max(y - extend_size, 0)
    
    # 计算扩展后的宽度和高度
    new_width = min(width + 2 * extend_size, ori_width - new_x)
    new_height = min(height + 2 * extend_size, ori_height - new_y)
    
    # 验证计算后的尺寸有效性
    if new_width <= 0 or new_height <= 0:
        raise ValueError("扩展后的裁剪区域尺寸无效")

    # 执行裁剪
    crop_box = (new_x, new_y, new_x + new_width, new_y + new_height)
    cropped_img = ori_img.crop(crop_box)
    
    # 返回调整后的坐标(相对于原图)、尺寸和裁剪图像
    return (new_x, new_y), (new_width, new_height),crop_box, cropped_img


clo_list = []
for entry in os.scandir(examples_dir):
    filename = entry.name
    if not filename.endswith('.jpg'):continue
    # break
    clo_list.append(filename)
    if len(clo_list)==2:
        filename,filename2 = clo_list
        clo_list.clear()
        control_image_path = os.path.join(examples_dir,filename)
        # redux_image_path = os.path.join(examples_dir,filename2)
        redux_image_path = 'tmp_other2.jpg'
    else:
        continue

    # 拼到原来的衣服上
    from util_for_os import osj,ose
    ori_path = osj(examples_dir2,filename)
    mask_path = osj(examples_dir3,filename.replace('.jpg','.png'))
    ori_img = Image.open( ori_path )
    mask_img = Image.open( mask_path )
    assert ori_img.size == mask_img.size,f'{ori_img.size} , {mask_img.size}'

    pos , tar_size = get_whitemask_pos_and_size(mask_img)

    # pdb.set_trace()

    # concat_type = input('input concat type [h,v,c] : ')
    # redux_image = concat_half( control_image_path, redux_image_path,
    #                           concat_type=concat_type )

    control_image = process_img_1024( control_image_path )
    redux_image = process_img_1024( redux_image_path )


    with torch.no_grad():
        # 这里试试不用衣服的redux相加
        # tmp_path = 'tmp_other.jpg'
        prompt_emb,pooled_prompt_emb = pipe_prior_redux(control_image,
                                                        return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
        prompt_emb2,pooled_prompt_emb2 = pipe_prior_redux(redux_image,return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
        alpha = 0.8
        prompt_emb = (1-alpha) * prompt_emb + alpha * prompt_emb2
        pooled_prompt_emb = (1-alpha) * pooled_prompt_emb + alpha * pooled_prompt_emb2
# del pipe_prior_redux
# torch.cuda.empty_cache()

# control_image = get_depth_by_lotus_g(depth_processor,control_image,
#                                      None,'cuda','d') # PIL*2    922*1050
# control_image = output_g.resize(target_shape)

# pdb.set_trace()

    control_image = resize_with_aspect(control_image_path)
    # next_control_image = resize_with_aspect(ori_path)

    control_image = depth_processor(
        control_image
    )[0].convert("RGB") # PIL 768 1024
    # next_control_image = depth_processor(
    #     next_control_image
    # )[0].convert("RGB") # PIL 768 1024

    # print(control_image.size)

    with torch.no_grad():
        image = pipe(
            # prompt=prompt,
            control_image=control_image,
            height=control_image.height,
            width=control_image.width,
            num_inference_steps=8,
            guidance_scale=10.0,
            # generator=torch.Generator().manual_seed(42),
            # **pipe_prior_output,
            prompt_embeds=prompt_emb,
            pooled_prompt_embeds=pooled_prompt_emb,
        ).images[0]
    # print(image.size)

    image_resized = image.resize( tar_size )
    ori_img.paste(image_resized, pos ,  )

    concat_tmp_res = horizontal_concat_images( [ 
        process_img_1024('',img_pil=control_image) , 
        process_img_1024(control_image_path),
        redux_image ,   
        process_img_1024('',img_pil=image) ,
        process_img_1024('',img_pil=ori_img),
        ] )

    concat_tmp_res.save('tmp_redux2.jpg')

    ''' stage 2
    由于改动的部分相对整体太小
    如果使用Fill 其实也并不会去参考prompt
    尝试，对 local 部分，临时生成的内容，往外扩展50 截取出来做修复，再贴回去
    记作   local_crop = crop( ori_img , pos , tar_size , extend_size=50 )
                            new pos x  = max(x-ext , 0)
                            new pos y = max(y-ext , 0)
                            new_size =  min( tar_size+2*ext , ori width ), 
                                        min( tar_size+2*ext , ori height)
                            return new_pos,new_size,ori_img.crop( new pos , new size )
           ori.paste( process(local_crop).resize( new_size ) , 
                      new_pos  )
    next_control = depth( local crop )
    prompt = redux( local crop )
    '''
    new_pos,new_size,crop_box,local_crop = smart_crop_with_extend(ori_img,pos,tar_size,
                                                         extend_size=200)
    
    local_crop = resize_with_aspect('',img_pil=local_crop)
    next_control_image = depth_processor(
        resize_with_aspect('',img_pil=Image.open(ori_path).crop(crop_box))
    )[0].convert('RGB')
    # ori img 继续作为redux传入，depth做限制，生成
    with torch.no_grad():
        prompt_emb,pooled_prompt_emb = pipe_prior_redux(local_crop,return_dict=False)
    # print(next_control_image.size)
    with torch.no_grad():
        next_image = pipe(
            # prompt=prompt,
            control_image=next_control_image,
            height=next_control_image.height,
            width=next_control_image.width,
            num_inference_steps=8,
            guidance_scale=10.0,
            # generator=torch.Generator().manual_seed(42),
            # **pipe_prior_output,
            prompt_embeds=prompt_emb,
            pooled_prompt_embeds=pooled_prompt_emb,
        ).images[0]
    # print(next_image.size)
    concat_tmp_res1 = horizontal_concat_images( [ 
        process_img_1024('',img_pil=next_control_image) , 
        process_img_1024('',img_pil=local_crop),
        process_img_1024('',img_pil=next_image) ,
        ] )
    # from util_for_blend import feather_blend,poisson_blend
    ori_img.paste( next_image.resize( new_size ) , new_pos )
    # ori_img1 = feather_blend( ori_img , next_image , new_pos ,feather_radius=20)
    # ori_img2 = poisson_blend( ori_img , next_image , new_pos )

    # concat_tmp_res = horizontal_concat_images([ori_img1,ori_img2])
    # concat_tmp_res.save('tmp_redux2.jpg')

    # pdb.set_trace()

    concat_tmp_res2 = horizontal_concat_images( [ 
        process_img_1024(control_image_path),
        redux_image,
        process_img_1024('',img_pil=ori_img) , 
        ] )
    from util_flux import vertical_concat_images
    concat_tmp_res = vertical_concat_images(
        [
            concat_tmp_res1,
            concat_tmp_res2,
        ]
    )
    concat_tmp_res.save('tmp_redux2.jpg')

    pdb.set_trace()

    # 拼到原来的衣服上
    # examples_dir2 = '/mnt/nas/shengjie/datasets/cloth_collar_balanced'
    # examples_dir3 = '/mnt/nas/shengjie/datasets/cloth_collar_localimg_mask'
    # from util_for_os import osj,ose
    # ori_path = osj(examples_dir2,filename)
    # mask_path = osj(examples_dir3,filename.replace('.jpg','.png'))
    # ori_img = Image.open( ori_path )
    # mask_img = Image.open( mask_path )
    # assert ori_img.size == mask_img.size,f'{ori_img.size} , {mask_img.size}'
    # image -> resize to white_mask.size
    # tar_position , tar_size = get_whitemask_pos_and_size( mask_img )
    # image paste to ori_img
    # image_resized = image.resize( tar_size )
    # ori_img.paste( pos , image_resized )