
import os,sys
osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)



import os
import json,random
from tqdm import tqdm
import shutil
from PIL import Image

from utils.util_for_os import osj,ose
from utils.util_flux import process_img_1024

'''
1, read names.txt
2, 前 4000 for train
   后 ~   for val
3, 读取 一张图 = image
   构建相同大小的mask, [ 纯白 | 纯黑 | 纯黑 ] = mask
   分别 save (train | val)/(image | mask_image)
'''

# tryon_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_tryon'
tryon_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_tryon_unpaired' # 4062

'''
分别遍历tryon dir 随机抽取其中 1k + 5 张图片（注：每张图片为 w=768*3 h=1024）
并且对每张图片构建相同大小的mask, [ 纯白 | 纯黑 | 纯黑 ] 样式是统一的，文件名与原图相同 存放到 mask_dir
'''
mask = None

tryon_names_path = osj(tryon_dir, 'names.txt')
# 读取 names.txt，筛选所有的jpg图片
with open(tryon_names_path, "r") as f:
    all_names = [line.strip() for line in f if line.strip()]

# 只保留.jpg结尾的文件名
jpg_names = [name for name in all_names if name.lower().endswith('.jpg')]
random.shuffle(jpg_names)

# 新数据集路径
# base_dir = "/mnt/nas/shengjie/datasets/FluxFill_tryon_4000"
base_dir = "/mnt/nas/shengjie/datasets/FluxFill_tryon_unpaired"
os.makedirs(base_dir, exist_ok=True)

# 创建子目录
train_image_dir = os.path.join(base_dir, "train/image")
train_mask_dir = os.path.join(base_dir, "train/mask_image")
val_image_dir = os.path.join(base_dir, "val/image")
val_mask_dir = os.path.join(base_dir, "val/mask_image")

os.makedirs(train_image_dir, exist_ok=True)
os.makedirs(train_mask_dir, exist_ok=True)
os.makedirs(val_image_dir, exist_ok=True)
os.makedirs(val_mask_dir, exist_ok=True)


# 固定 prompt
prompt = "[tryon] let the person try on the middle garment and keep the right pose"

# 训练/验证划分
train_num = 0
train_names = jpg_names[:train_num]
val_names = jpg_names[train_num:]

def process_tryon_split(names, image_dir, mask_dir, meta_path):
    meta_data = []
    for name in tqdm(names, desc=f"Processing {meta_path}"):
        img_path = osj(tryon_dir, name)
        img = Image.open(img_path)
        w, h = img.size
        # INSERT_YOUR_CODE
        # 构建mask: 左1/3为白, 右2/3为黑
        global mask,prompt
        if mask is None:
            mask = Image.new("RGB", (w, h), (0, 0, 0))
            white_w = w // 3
            white_img = Image.new("RGB", (white_w, h), (255, 255, 255))
            mask.paste(white_img, (0, 0))

        # 保存文件名
        stem = os.path.splitext(name)[0]
        ext = os.path.splitext(name)[1]
        image_name = f"{stem}_image{ext}"
        mask_name = f"{stem}_mask{ext}"

        # 保存图片
        img.save(osj(image_dir, image_name))
        mask.save(osj(mask_dir, mask_name)) 

        # 记录元数据
        meta_entry = {
            "file_name": f"image/{image_name}",
            "mask_image": f"mask_image/{mask_name}",
            "prompt": prompt
        }
        meta_data.append(meta_entry)
    # 写入jsonl
    with open(meta_path, "w") as f:
        for item in meta_data:
            f.write(json.dumps(item) + "\n")

# 处理训练集
process_tryon_split(
    train_names,
    train_image_dir,
    train_mask_dir,
    osj(base_dir, "train/metadata.jsonl")
)

# 处理验证集
process_tryon_split(
    val_names,
    val_image_dir,
    val_mask_dir,
    osj(base_dir, "val/metadata.jsonl")
)

# 生成数据集描述文件
dataset_info = {
    "train_size": len(train_names),
    "val_size": len(val_names),
    "resolution": "2304*1024",
    "default_prompt": prompt
}
with open(osj(base_dir, "dataset_info.json"), "w") as f:
    json.dump(dataset_info, f, indent=2)

print("✅ JSONL 数据集生成完成！")
