
import os,sys
osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)



import os,torch
import json,random
from tqdm import tqdm
import shutil
from PIL import Image

from utils.util_for_os import osj,ose
from utils.util_flux import process_img_1024
from utils.MODEL_CKP import RMBG

from demo_rmbg import  get_masked_img
def load_rmbg():
    from transformers import AutoModelForImageSegmentation
    model = AutoModelForImageSegmentation.from_pretrained(RMBG,trust_remote_code=True)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    return model
'''
1 给定 clo dir  human dir
2 从 clo 采样 1k+10 个图 对应 获取 human 的 图
3 1k for train and 10 for val
  clo for image 
  human for control image

extr process 
  clo -> rmbg -> save
  统一分辨率为 1024 1024
'''

# clo_type='upper'
# clo_type='lower'
clo_type='dresses'
# 固定 prompt
prompt = f"[{clo_type}tryoff] tryoff {clo_type} clothing from control image"
sample_num = 1010

extractclo_dir = f'/mnt/nas/shengjie/datasets/DressCode_1024/{clo_type}/cloth'
extractclo_human_dir = f'/mnt/nas/shengjie/datasets/DressCode_1024/{clo_type}/image'
extractclo_names_path = osj(extractclo_dir, 'names.txt')
# 读取 names.txt，筛选所有的jpg图片
with open(extractclo_names_path, "r") as f:
    all_names = [line.strip() for line in f if line.strip()]
    random.shuffle(all_names)
    all_names = all_names[:sample_num]

# 只保留.jpg结尾的文件名
jpg_names = [name for name in all_names if name.lower().endswith('.jpg')]

# 新数据集路径
base_dir = f"/mnt/nas/shengjie/datasets/KontextRefControl_extractclo_{clo_type}"
if os.path.exists(base_dir):
    shutil.rmtree(base_dir)
os.mkdir(base_dir)

# 创建子目录
train_image_dir = os.path.join(base_dir, "train/image")
train_control_dir = os.path.join(base_dir, "train/control_image")
val_image_dir = os.path.join(base_dir, "val/image")
val_control_dir = os.path.join(base_dir, "val/control_image")

os.makedirs(train_image_dir, exist_ok=True)
os.makedirs(train_control_dir, exist_ok=True)
os.makedirs(val_image_dir, exist_ok=True)
os.makedirs(val_control_dir, exist_ok=True)


# 训练/验证划分
train_num = 1000
train_names = jpg_names[:train_num]
val_names = jpg_names[train_num:]

def process_extractclo_split(names, image_dir, control_dir, meta_path):
    meta_data = []
    for name in tqdm(names, desc=f"Processing {meta_path}"):
        img_path = osj(extractclo_dir, name) # clo _1
        control_img_path = osj(extractclo_human_dir, name.replace('_1', '_0')) # human _0
        # img = Image.open(img_path)
        _, img = get_masked_img(img_path, rmbg_model, 'white')
        control_img = process_img_1024(control_img_path)

        # 保存文件名
        stem = os.path.splitext(name)[0]
        ext = os.path.splitext(name)[1]
        image_name = f"{stem}_image{ext}"
        control_name = f"{stem}_control{ext}"

        # 保存图片
        control_img.save(osj(control_dir, control_name)) # human
        img.save(osj(image_dir, image_name)) # clo

        # 记录元数据
        meta_entry = {
            "file_name": f"image/{image_name}",
            "control_image": f"control_image/{control_name}",
            "prompt": prompt
        }
        meta_data.append(meta_entry)
    # 写入jsonl
    with open(meta_path, "w") as f:
        for item in meta_data:
            f.write(json.dumps(item) + "\n")

rmbg_model = load_rmbg()

# 处理训练集
process_extractclo_split(
    train_names,
    train_image_dir,
    train_control_dir,
    osj(base_dir, "train/metadata.jsonl")
)

# 处理验证集
process_extractclo_split(
    val_names,
    val_image_dir,
    val_control_dir,
    osj(base_dir, "val/metadata.jsonl")
)

# 生成数据集描述文件
dataset_info = {
    "train_size": len(train_names),
    "val_size": len(val_names),
    "resolution": "768*1024", # h w 
    "default_prompt": prompt
}
with open(osj(base_dir, "dataset_info.json"), "w") as f:
    json.dump(dataset_info, f, indent=2)

print("✅ JSONL 数据集生成完成！")
