from utils.util_flux import process_img_1024, horizontal_concat_images
from PIL import Image, ImageOps, ImageFilter
import numpy as np
import torch,pdb



def get_depth(
    input_img,
    depth_processor=None,
    get_mask=None,
    mask_model=None,
    gaussian_score=20
):

    input_img = process_img_1024('', img_pil=input_img)

    mask_pil, no_bg_img = get_mask(mask_model, input_img)
    img_pil = input_img

    # depth_processor应返回PIL Image或类似对象
    depth_pil = depth_processor(img_pil)[0].convert('RGB')

    if mask_pil is not None:
        mask_pil = process_img_1024(mask_pil, pad_color=(0, 0, 0)).convert('L')
        black_mask = Image.new('RGB', depth_pil.size, 0)
        # 高斯模糊与边缘处理
        depth_pil_blur = replace_lama_cleaner(mask_pil, depth_pil, gaussian_score)
        masked_depth_blur = Image.composite(
            black_mask, depth_pil_blur, ImageOps.invert(mask_pil)
        )
    else:
        masked_depth_blur = depth_pil

    return masked_depth_blur.convert('RGB')


def replace_lama_cleaner(mask_image, depth_pil, gaussian_score=20):
    # 1. 提取边缘信息，生成mask
    gray = mask_image.convert("L")
    # 使用FIND_EDGES滤波器
    edges = gray.filter(ImageFilter.FIND_EDGES)
    # 增强边缘对比度
    edges = ImageOps.autocontrast(edges)
    # 二值化，得到mask
    edge_np = np.array(edges)
    threshold = 30  # 可调整
    mask_np = (edge_np > threshold).astype(np.uint8) * 255
    mask = Image.fromarray(mask_np, mode="L")
    # tmp_mask = mask

    # 对mask膨胀一点，让边缘更宽
    mask = mask.filter(ImageFilter.MaxFilter(7))

    # 2. 对边缘不模糊，对边缘内部做模糊处理
    # 先对整张图做高斯模糊
    blurred = depth_pil.filter(ImageFilter.GaussianBlur(radius= gaussian_score ))

    # mask反转，边缘为0，内部为255
    inv_mask = ImageOps.invert(mask)

    # pdb.set_trace()

    # 合成：边缘用原图，内部用模糊图
    #  mask_image_pil 的 白色部分 填充为 depth_pil 中 对应区域的 颜色
    mask_np = np.array(gray) > 200 # 在第三维度复制自己 => （h,w,ch)
    depth_np = np.array( depth_pil )
    mask_3d = np.repeat(mask_np[:, :, np.newaxis], 3, axis=2)
    mask_image_merged = np.zeros_like(depth_np) 
    mask_image_merged[mask_3d] = depth_np[mask_3d]
    mask_image_merged = Image.fromarray( mask_image_merged )
    result = Image.composite(blurred, mask_image_merged, inv_mask)

    # 调试用，实际部署时可注释掉
    try:
        horizontal_concat_images([
            mask_image, mask, inv_mask,
            mask_image_merged, result
        ]).save("tmp_blur.jpg")
    except Exception as e:
        # 防止保存失败导致主流程中断
        print(f"Warning: failed to save tmp_blur.jpg: {e}")

    return result


def get_result(
    img1,
    img2,
    depth_processor=None,
    redux_pipe=None,
    flux_pipe=None,
    get_mask=None,
    mask_model=None,
    alpha=0.3,
    control_alpha=0.2,
    gaussian_score=20,
    steps=20
):
    img1 = process_img_1024('', img_pil=img1)
    restored_depth = get_depth(
        img1,
        depth_processor=depth_processor,
        get_mask=get_mask,
        mask_model=mask_model,
        gaussian_score=gaussian_score
    )
    if img2 is None or restored_depth is None:
        return None
    # 这里用简单的混合模拟合成
    # restored_depth = cv2.cvtColor(restored_depth, cv2.COLOR_GRAY2RGB)
    # result = cv2.addWeighted(img2, 0.7, restored_depth, 0.3, 0)
    redux_image =  process_img_1024( '' , img_pil=img2 )
    control_image =  process_img_1024( '' , img_pil=restored_depth )

    

    with torch.no_grad():
        
        prompt_emb,pooled_prompt_emb = redux_pipe(redux_image,return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
        prompt_emb[:,-25:,:]=0
        # prompt_emb2[:,0::2,:]=0

        # prompt_emb = (1-alpha) * prompt_emb + alpha * prompt_emb2
        prompt_emb = alpha * prompt_emb

        image = flux_pipe(
            # prompt=prompt,
            control_image=control_image,
            control_alpha=control_alpha,
            height=control_image.height,
            width=control_image.width,
            num_inference_steps=steps,
            guidance_scale=4.5,
            prompt_embeds=prompt_emb,
            pooled_prompt_embeds=pooled_prompt_emb,
        ).images[0]

        mask_pil , no_bg_img =   get_mask( mask_model , image, )
        white_mask = Image.new( 'RGB' , image.size , 'white'  )
        masked_img = Image.composite( white_mask , image , ImageOps.invert(mask_pil) ) # 正常是 黑留白遮 ops 白留黑遮
    

    return masked_img

def get_result_ori(img1 , img2,
                depth_processor = None,
                redux_pipe = None, flux_pipe = None,
                get_mask=None,
                mask_model=None,
                steps=20):
    if img1 is None or img2 is None:
        return None

    input_img = process_img_1024('', img_pil=img1)

    mask_pil, no_bg_img = get_mask(mask_model, input_img)
    img_pil = input_img

    # depth_processor应返回PIL Image或类似对象
    depth_pil = depth_processor(img_pil)[0].convert('RGB')

    if mask_pil is not None:
        mask_pil = process_img_1024(mask_pil, pad_color=(0, 0, 0)).convert('L')
        black_mask = Image.new('RGB', depth_pil.size, 0)
        # 高斯模糊与边缘处理
        # depth_pil_blur = replace_lama_cleaner(mask_pil, depth_pil, gaussian_score)
        masked_depth = Image.composite(
            black_mask, depth_pil, ImageOps.invert(mask_pil)
        )
    else:
        masked_depth = depth_pil

    masked_depth = masked_depth.convert('RGB')
    ## end get depth

    redux_image =  process_img_1024( '' , img_pil=img2 ) # redux
    control_image =  process_img_1024( '' , img_pil=masked_depth ) # depth

    with torch.no_grad():
        
        prompt_emb,pooled_prompt_emb = redux_pipe(redux_image,return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 

        image = flux_pipe(
            # prompt=prompt,
            control_image=control_image,
            control_image2=None,
            height=control_image.height,
            width=control_image.width,
            num_inference_steps=steps,
            guidance_scale=4.5,
            prompt_embeds=prompt_emb,
            pooled_prompt_embeds=pooled_prompt_emb,
        ).images[0]

        mask_pil , no_bg_img =   get_mask( mask_model , image, )
        white_mask = Image.new( 'RGB' , image.size , 'white'  )
        masked_img = Image.composite( white_mask , image , ImageOps.invert(mask_pil) ) # 正常是 黑留白遮 ops 白留黑遮
    

    return masked_img

    