
import os,sys
osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)



import os
import json,random
from tqdm import tqdm
import shutil
from PIL import Image

from utils.util_for_os import osj,ose
from utils.util_flux import process_img_1024

# /mnt/nas/shengjie/datasets/dataset_depth_control_depth/
# control: ori depth
# file: restored depth
'''
1, read names.txt
2, 前 550 for train
   后 ~   for val
3, 读取 一张图  split 前一半 | 后一半 
   前一半 = target   后一半 = control
   分别 save (train | val)/(image | control_image)
'''

puthere_dir = '/mnt/nas/shengjie/datasets/puthere_data_process_949'
puthere_names_path = osj(puthere_dir, 'names.txt')
# 读取 names.txt，筛选所有的jpg图片
with open(puthere_names_path, "r") as f:
    all_names = [line.strip() for line in f if line.strip()]

# 只保留.jpg结尾的文件名
jpg_names = [name for name in all_names if name.lower().endswith('.jpg')]
random.shuffle(jpg_names)

# 新数据集路径
base_dir = "/mnt/nas/shengjie/datasets/KontextRefControl_puthere_949"
os.makedirs(base_dir, exist_ok=True)

# 创建子目录
train_image_dir = os.path.join(base_dir, "train/image")
train_control_dir = os.path.join(base_dir, "train/control_image")
val_image_dir = os.path.join(base_dir, "val/image")
val_control_dir = os.path.join(base_dir, "val/control_image")

os.makedirs(train_image_dir, exist_ok=True)
os.makedirs(train_control_dir, exist_ok=True)
os.makedirs(val_image_dir, exist_ok=True)
os.makedirs(val_control_dir, exist_ok=True)


# 固定 prompt
prompt = "[puthere] put it here"

# 训练/验证划分
train_num = 940
train_names = jpg_names[:train_num]
val_names = jpg_names[train_num:]

def process_puthere_split(names, image_dir, control_dir, meta_path):
    meta_data = []
    for name in tqdm(names, desc=f"Processing {meta_path}"):
        img_path = osj(puthere_dir, name)
        img = Image.open(img_path)
        w, h = img.size
        # 分割为左右两半
        left_box = (0, 0, w//2, h)
        right_box = (w//2, 0, w, h)
        img_left = img.crop(left_box) # target
        img_right = img.crop(right_box) # control
        img_left = process_img_1024('',img_pil=img_left)
        img_right = process_img_1024('',img_pil=img_right)

        # 保存文件名
        stem = os.path.splitext(name)[0]
        ext = os.path.splitext(name)[1]
        image_name = f"{stem}_image{ext}"
        control_name = f"{stem}_control{ext}"

        # 保存图片
        img_left.save(osj(image_dir, image_name))
        img_right.save(osj(control_dir, control_name)) 

        # 记录元数据
        meta_entry = {
            "file_name": f"image/{image_name}",
            "control_image": f"control_image/{control_name}",
            "prompt": prompt
        }
        meta_data.append(meta_entry)
    # 写入jsonl
    with open(meta_path, "w") as f:
        for item in meta_data:
            f.write(json.dumps(item) + "\n")

# 处理训练集
process_puthere_split(
    train_names,
    train_image_dir,
    train_control_dir,
    osj(base_dir, "train/metadata.jsonl")
)

# 处理验证集
process_puthere_split(
    val_names,
    val_image_dir,
    val_control_dir,
    osj(base_dir, "val/metadata.jsonl")
)

# 生成数据集描述文件
dataset_info = {
    "train_size": len(train_names),
    "val_size": len(val_names),
    "resolution": "1024*1024",
    "default_prompt": prompt
}
with open(osj(base_dir, "dataset_info.json"), "w") as f:
    json.dump(dataset_info, f, indent=2)

print("✅ JSONL 数据集生成完成！")
