'''
数据预处理
find img1 img2
mask1, img1_rm = rmbg(img1)
target = img2
img2_pose = get img2 pose by img2 name from pose dir
mask2, img2_rm= rmbg(img2)
img2_env = get env from img2 by mask2 (imageOps remove region of black and remove white)
img2_ref = merge(img2_pose , img2_env)

input_img = concat( target, img2_ref, img1_rm ) 
    input_img_path = input_img.save(datasets_dir, save_name='{img1_name}__{img2_name}.jpg')
input_prompt = "refcontrolPoseEnv change pose and env to photo with reference from left side," + \
               "change person to photo with refence from right side"

build json:
input_img split train and val
{
    'train":[
        {
            'prompt':input_prompt,
            'input': input_img_path
        }, ...
    ],
    'val":[
        {
            'prompt':input_prompt,
            'input': input_img_path
        }, ...
    ],
}

# 逻辑 更新
data_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_pose'
data_structure:
dataset/
├── control/   # pose maps
└── result/    # corresponding images

Files in control and result share the same names.
Example: depth/control/0001.png ↔ depth/result/0001.png

process_dir = data_dir
process_control_dir = osj(process_dir, 'control')
process_res_dir = osj(process_dir, 'result')

control is concat_img [img1,pose_img2] will be trained
result is  concat_img [img1,img2] final ground_truth img

由于 control 图 是两张合在一起的
因此 需要从 pose 图 推测 img2 的大小, pose 图 大部分是黑色，除了肢体关键点有颜色
img1, pose_img2 = process( control )
res_img1, res_img2 = crop_res_by_size( result, img1.size, pose_img2.size )
mask1, nobg1 = process_rmbg( res_img1 ) mask 中 人物部分白色其他黑色, nobg1 中 只有人物图像，没有环境图像
mask2, nobg2 = process_rmbg( res_img2 )
env2 = process_env( mask2 , res_img2 ) 从 真实图中 根据 mask 反向 提取 env 环境图, env2 中 此时 人物部分为白色
new_pose_img2 = merge(env2 , pose_img2) 合并 env2 和 pose_img2 图, 将env2中白色部分填充pose_img2的对应区域

new control = [nobg1, new_pose_img2]  new_pose_img2 中 携带 pose 和 env 信息
new result =  [nobg1, img2]

## control size
process with 1526 height 1024
process new control
process new result

datasets_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_pose'
input_img = concat( new result, new control ) 
    input_img_path = input_img.save(datasets_dir, save_name='{img1_name}__{img2_name}.jpg')
input_prompt = "refcontrolPoseEnv change pose and env to photo with reference from left side," + \
               "change person to photo with refence from right side"

build json:
input_img split train and val
{
    'train":[
        {
            'prompt':input_prompt,
            'input': input_img_path
        }, ...
    ],
    'val":[
        {
            'prompt':input_prompt,
            'input': input_img_path
        }, ...
    ],
}
save_json_file = './data_poseenv_control_100.json'
save to json file
'''
import os,sys

osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)

os.environ['CUDA_VISIBLE_DEVICES']='2'

from demo_rmbg import load_rmbg,get_mask_by_rmbg
from demo_sam import load_sam2,get_mask_by_img_coor
from utils.util_for_os import osj,osb,ose
from utils.util_flux import process_img_1024,horizontal_concat_images
import utils.util_for_huggingface


import cv2,json,pdb
import numpy as np
from PIL import Image
from tqdm import tqdm

# model_rmbg = load_rmbg()


def get_mask_and_masked_img(
    get_mask,
    mask_model,
    input_img,
):
    mask_pil, no_bg_img = get_mask(mask_model, input_img)
    return mask_pil, no_bg_img 

data_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_pose'
save_datasets_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_poseenv'
os.makedirs(save_datasets_dir, exist_ok=True)
will_save = {
    'train':[],
    'val':[],
}
save_json_file = './data_poseenv_control_100.json'

input_prompt = "[refcontrolPoseEnv] change pose and env to photo with reference from left side," + \
               "change person to photo with refence from right side"

process_dir = data_dir
process_control_dir = osj(process_dir, 'control')
process_result_dir = osj(process_dir, 'result')

control_names_path = osj( process_control_dir, 'names.txt' )
result_names_path = osj( process_result_dir, 'names.txt' )

assert ose( control_names_path ),control_names_path
assert ose( result_names_path ),result_names_path

with open( control_names_path ,encoding='utf-8') as f:
    control_names = f.readlines()
control_names = [name.strip() for name in control_names if name.strip().endswith('.png')]


def process_pose(img_path):
    """
    处理pose图片，找到右侧最大的黑色区域，返回左侧原图和右侧pose图
    """
    # 读取图片
    img = cv2.imread(img_path)
    if img is None:
        raise ValueError(f"无法读取图片: {img_path}")
    
    # 转换为灰度图
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    height, width = gray.shape
    
    # 找到最左侧和最右侧的黑色点
    # 从右往左扫描，找到第一个非黑色点作为分割线
    split_x = width
    for x in range(width - 1, -1, -1):
        # 检查这一列是否有非黑色像素
        column = gray[:, x]
        black_pixels = np.sum(column== 0)  # 阈值240，考虑噪声
        if black_pixels < height * 0.5:  # 如果超过10%的像素不是黑色
            split_x = x + 1
            break
    
    # 如果没找到合适的分割线，使用中间位置
    if split_x == width:
        split_x = width // 2
        print('没有找到分割线',img_path)
    
    # 分割图片
    left_img = img[:, :split_x]  # 左侧原图
    right_img = img[:, split_x:]  # 右侧pose图
    
    # 转换为PIL格式
    left_pil = Image.fromarray(cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB))
    right_pil = Image.fromarray(cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB))
    
    return left_pil, right_pil
def crop_res_by_size(img2_path, size1):
    """
    根据给定的size1和size2分割img2
    size1: 左侧原图的尺寸 (width, height)
    size2: 右侧pose图的尺寸 (width, height)
    """
    # 读取img2
    img2 = cv2.imread(img2_path)
    if img2 is None:
        raise ValueError(f"无法读取图片: {img2_path}")
    
    height, width = img2.shape[:2]
    
    # 计算分割位置
    # 假设img2是按照size1和size2拼接的
    split_x = size1[0]  # 使用size1的宽度作为分割点
    
    # 确保分割点不超出图片边界
    if split_x >= width:
        split_x = width // 2
        print(f'分割点超出边界，使用中间位置: {img2_path}')
    
    # 分割图片
    left_img = img2[:, :split_x]  # 左侧部分
    right_img = img2[:, split_x:]  # 右侧部分
    
    # 转换为PIL格式
    left_pil = Image.fromarray(cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB))
    right_pil = Image.fromarray(cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB))
    
    return left_pil, right_pil

def get_masked_img(img, mask):
    """
    根据mask提取图像，mask中白色部分保留，黑色部分设为黑色
    img: 原始图像
    mask: 掩码图像，白色部分(255)保留，黑色部分(0)设为黑色
    返回: 三通道RGB图像
    """
    # 将图像和mask转换为numpy数组
    img_np = np.array(img)
    mask_np = np.array(mask)
    # 对mask进行二值化处理，确保只有0和255两个值
    mask_np = (mask_np > 127).astype(np.uint8) * 255
    
    # 确保图像是RGB格式
    if img_np.shape[2] == 4:
        # 如果是RGBA，转换为RGB
        img_rgb = img_np[:, :, :3]
    else:
        img_rgb = img_np.copy()
    
    # 创建结果图像
    result = np.zeros_like(img_rgb)
    
    # mask中白色部分(255)保留原图像，黑色部分(0)设为黑色
    white_mask = mask_np > 10
    result[white_mask] = img_rgb[white_mask]
    
    # 转换为PIL图像返回
    masked_img = Image.fromarray(result, 'RGB')
    
    return masked_img

def process_env(mask, img):
    """
    从真实图中根据mask反向提取环境图
    mask: 人物部分为白色(255)，背景部分为黑色(0)
    img: 原始图像
    返回: env图像，其中人物部分为黑色，背景部分保持原样
    """
    # 将mask转换为numpy数组
    mask_np = np.array(mask)
    # 对mask进行二值化处理，确保只有0和255两个值
    mask_np = (mask_np > 127).astype(np.uint8) * 255
    
    
    img_np = np.array(img)
    
    # 创建环境图，初始化为黑色
    env = np.zeros_like(img_np)
    
    # 将mask中黑色部分(背景)的像素复制到env中
    # mask中黑色部分(0)对应背景，白色部分(255)对应人物
    background_mask = mask_np == 0
    
    # 将背景部分的像素复制到env
    env[background_mask] = img_np[background_mask]
    
    # 人物部分保持黑色(0)
    # 转换为PIL图像返回
    env_pil = Image.fromarray(env)
    
    return env_pil

def merge(env2, mask2, pose_img2):
    """
    合并env2和pose_img2图像
    env2: 环境图像，人物部分为黑色，背景部分保持原样
    mask2: 人物部分为白色(255)，背景部分为黑色(0)
    pose_img2: pose图像
    返回: 合并后的图像，将pose图像中的人物部分粘贴到env2的对应区域
    """
    # 转换为numpy数组
    env2_np = np.array(env2)
    mask2_np = np.array(mask2)
    # 对mask进行二值化处理，确保只有0和255两个值
    mask2_np = (mask2_np > 127).astype(np.uint8) * 255
    

    pose_img2_np = np.array(pose_img2)
    
    # 创建结果图像，初始化为env2
    result = env2_np.copy()
    
    # 找到mask中白色部分(人物区域)
    person_mask = mask2_np > 0
    
    # 将pose图像中对应人物区域的像素复制到结果图像
    result[person_mask] = pose_img2_np[person_mask]
    
    # 转换为PIL图像返回
    result_pil = Image.fromarray(result)
    
    return result_pil

def process_data_by_sam():
    sam2 = load_sam2()
    # control/name.png == result/name.png
    for id,control_name in enumerate(control_names):
        res_img1_path = osj( process_control_dir, control_name )
        res_img2_path = osj( process_result_dir, control_name )

        # 图1 根据 pose 图2 生成 res 图 2， 图1 = res 图一
        img1, pose_img2 = process_pose( res_img1_path )
        res_img1, res_img2 = crop_res_by_size( res_img2_path, img1.size )
        img1 = process_img_1024('',img1)
        pose_img2 = process_img_1024('',pose_img2,pad_color=(0,0,0))
        res_img1 = process_img_1024('',res_img1,)
        res_img2 = process_img_1024('',res_img2,)

        horizontal_concat_images([img1,pose_img2,res_img1,res_img2]).save('tmp_pose.jpg')
        # INSERT_YOUR_CODE
        user_input = input("是否继续处理该样本？(y/n): ").strip().lower()
        if user_input == 'n':
            continue

        # nobg1 是 png 加了 透明度
        # mask1, _ = get_mask_and_masked_img( get_mask_by_rmbg,model_rmbg,res_img1 )
        mask1 = get_mask_by_img_coor(sam2,res_img1)
        mask1 = (mask1*255).astype(np.uint8)
        nobg1 = get_masked_img(res_img1 ,mask1)
        # mask2, _ = get_mask_and_masked_img( get_mask_by_rmbg,model_rmbg,res_img2 )
        mask2 = get_mask_by_img_coor(sam2 ,res_img2)
        mask2 = (mask2*255).astype(np.uint8)
        nobg2 = get_masked_img(res_img2 ,mask2)
        # horizontal_concat_images([mask1,nobg1,mask2,nobg2]).save('tmp_pose.jpg')

        env2 = process_env( mask2 , res_img2 )
        
        new_pose_img2 = merge(env2, mask2 , pose_img2)
        
        # horizontal_concat_images([env2,new_pose_img2]).save('tmp_pose.jpg')
        
        new_control = horizontal_concat_images([nobg1, new_pose_img2],gap=0,line_width=0)
        new_result = horizontal_concat_images([nobg1, res_img2],gap=0,line_width=0)

        # new_control.save('tmp_pose.jpg')
        # new_result.save('tmp_pose.jpg')

        new_control_pil = process_img_1024('',img_pil=new_control,target_shape=(2048,1024))
        new_result_pil = process_img_1024('',img_pil=new_result,target_shape=(2048,1024))

        # new_control_pil.save('tmp_pose.jpg')
        # new_result_pil.save('tmp_pose.jpg')

        input_img = horizontal_concat_images( [new_result_pil, new_control_pil]
                                                    ,gap=0,line_width=0 )
        input_img.save('tmp_pose.jpg')
        input_img_save_path = osj( save_datasets_dir, control_name )
        input_img.save( input_img_save_path )

        will_save['val' if id >= 100 else 'train'].append({
            'prompt':input_prompt,
            'input':input_img_save_path,
        })
        # break


    with open(save_json_file, 'w', encoding='utf-8') as f:
        json.dump(will_save, f, indent=2, ensure_ascii=False)

    print(f"Saved {len(will_save['train'])} train samples and {len(will_save['val'])} "+\
            "val samples to {save_json_file}")

'''
反转 concat new control[0]  new result [0] 的 黑色背景为白色

从  save dir 中读取图片
'''
def get_masked_img_filled_white(img, mask):
    """
    根据mask提取图像，mask中白色部分保留，黑色部分设为黑色
    img: 原始图像
    mask: 掩码图像，白色部分(255)保留，黑色部分(0)设为黑色
    返回: 三通道RGB图像
    """
    # 将图像和mask转换为numpy数组
    img_np = np.array(img)
    mask_np = np.array(mask)
    # 对mask进行二值化处理，确保只有0和255两个值
    mask_np = (mask_np > 127).astype(np.uint8) * 255
    
    # 确保图像是RGB格式
    if img_np.shape[2] == 4:
        # 如果是RGBA，转换为RGB
        img_rgb = img_np[:, :, :3]
    else:
        img_rgb = img_np.copy()
    
    # 创建结果图像
    result = np.ones_like(img_rgb)*255
    
    # mask中白色部分(255)保留原图像，黑色部分(0)设为黑色
    white_mask = mask_np > 10
    result[white_mask] = img_rgb[white_mask]
    
    # 转换为PIL图像返回
    masked_img = Image.fromarray(result, 'RGB')
    
    return masked_img
def process_again_invert_black_bg(ori_dir,save_dir):
    model_rmbg = load_rmbg()
    os.makedirs(save_dir, exist_ok=True)

    names_path = osj( ori_dir , 'names.txt' )
    assert ose(names_path), names_path

    # INSERT_YOUR_CODE
    with open(names_path, encoding='utf-8') as f:
        names = [line.strip() for line in f if line.strip().endswith('.png')]

    for name in tqdm(names):
        img_path = osj(ori_dir , name)

        # INSERT_YOUR_CODE
        img = Image.open(img_path)
        w, h = img.size
        assert w == 4096 and h == 1024, f"Unexpected image size: {img.size} for {img_path}"

        crops = []
        for i in range(4):
            left = i * 1024
            right = (i + 1) * 1024
            crop = img.crop((left, 0, right, 1024))
            crops.append(crop)
            # Optionally, save each crop if needed:
            # crop.save(osj(save_dir, f"{os.path.splitext(name)[0]}_crop{i}.png"))

        # 处理 第一张 提取 mask
        # 根据 mask 将 mask 中 黑色区域 在 原图中 填充为白色
        img = crops[0]
        mask, _ = get_mask_and_masked_img( get_mask_by_rmbg,model_rmbg,img )
        nobg = get_masked_img_filled_white(img ,mask)

        crops[0] = crops[2] = nobg

        input_img = horizontal_concat_images( crops, gap=0, line_width=0 )
        input_img.save('tmp_pose.jpg')

        pdb.set_trace()

        input_img_save_path = osj( save_dir, name )
        input_img.save( input_img_save_path )

def process_json_file(img_dir,save_json_file_path):
    names_path = osj( img_dir , 'names.txt' )
    assert ose(names_path), names_path

    # INSERT_YOUR_CODE
    with open(names_path, encoding='utf-8') as f:
        names = [line.strip() for line in f if line.strip().endswith('.png')]

    will_save = {
        'train':[],
        'val':[],
        }

    input_prompt = "refcontrolPoseEnv change pose and env to photo with reference from right side," + \
                "change person to photo with refence from left side"

    for id,name in tqdm(enumerate(names)):
        input_img_save_path = osj( img_dir , name )

        will_save['val' if id >= 100 else 'train'].append({
                'prompt':input_prompt,
                'input':input_img_save_path,
            })

    with open(save_json_file_path, 'w', encoding='utf-8') as f:
        json.dump(will_save, f, indent=2, ensure_ascii=False)

    print(f"Saved {len(will_save['train'])} train samples and {len(will_save['val'])} "+\
            "val samples to {save_json_file}")

def build_dir_for_aitoolkit():
    # fold dir (.png + .txt)
    # control dir (.png)
    posenv_ori_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_poseenv_white_ori' # control
    posenv_tar_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_poseenv_white_tar' # folder
    posenv_val_ori_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_poseenv_white_val_ori' # control
    posenv_val_tar_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_poseenv_white_val_tar' # folder
    os.makedirs(posenv_ori_dir, exist_ok=True)
    os.makedirs(posenv_tar_dir, exist_ok=True)
    os.makedirs(posenv_val_ori_dir, exist_ok=True)
    os.makedirs(posenv_val_tar_dir, exist_ok=True)

    source_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_poseenv_white'
    names_path = osj( source_dir , 'names.txt' )
    assert ose(names_path), names_path

    # INSERT_YOUR_CODE
    with open(names_path, encoding='utf-8') as f:
        names = [line.strip() for line in f if line.strip().endswith('.png')]

    for id,name in tqdm(enumerate(names)):
        img_path = osj( source_dir, name )

        concat_posenv_img = Image.open(img_path)
        img_tar_pil = concat_posenv_img.crop( (0,0,2048,1024) )
        w,h = concat_posenv_img.size
        img_ori_pil = concat_posenv_img.crop( (w-2048,h-1024 ,w,h ) )

        # save ori to control
        # save tar and txt to folder
        ori_path = osj( posenv_ori_dir if id < 100 else posenv_val_ori_dir, name )
        tar_path = osj( posenv_tar_dir if id < 100 else posenv_val_tar_dir, name )
        txt_path = osj( posenv_tar_dir if id < 100 else posenv_val_tar_dir, 
                                                        name.replace('.png', '.txt') )

        img_ori_pil.save( ori_path )
        img_tar_pil.save( tar_path )
        with open(txt_path,'w',encoding='utf-8') as f:
            f.write(input_prompt)


def process_data():
    # 处理数据
    save_datasets_dir_bg_white = '/mnt/nas/shengjie/datasets/KontextRefControl_poseenv_white'
    # process_again_invert_black_bg(save_datasets_dir,save_datasets_dir_bg_white)

    # 获得json文件
    save_json_file = './data_poseenv_control_100.json'
    process_json_file(save_datasets_dir_bg_white,save_json_file)

def split_data():
    build_dir_for_aitoolkit()

if __name__=='__main__':
    split_data()