import os,sys
osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)

import argparse

def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='示例脚本：接收CUDA设备和端口参数')
    
    # 添加参数
    parser.add_argument(
        '-c',
        '--cuda_id', 
        type=str,
        required=True,  # 必须传入
        help='CUDA设备ID，例如 "0" 或 "0,1"（字符串类型）'
    )
    
    # 解析参数
    args = parser.parse_args()
    return args

args = parse_args()

import os,sys,pdb
os.environ['CUDA_VISIBLE_DEVICES']=args.cuda_id

from utils.util_for_os import ose,osj

# Tagbliton/kontext_extract_clothes_lora
par_lora_dir = '/mnt/nas/shengjie/depredux_output_rank16_0923'
lora_dir = lambda id: f'{par_lora_dir}/checkpoint-{id}'
lora_name = 'pytorch_lora_weights.safetensors'

val_dir = '/mnt/nas/shengjie/datasets/KontextRefControl_depredux/val'
val_json_file = f'{val_dir}/metadata.jsonl'

def get_lora_path(lora_dir , lora_name):
    return osj( lora_dir , lora_name )

import torch
from diffusers import FluxKontextPipeline
from PIL import Image

import gc

from utils.MODEL_CKP import FLUX_KONTEXT
from utils.util_flux import process_img_1024,vertical_concat_images,horizontal_concat_images

pipe = FluxKontextPipeline.from_pretrained(FLUX_KONTEXT, 
                                           torch_dtype=torch.bfloat16)
# 阿里妈妈 lora 加速 8步 完成    
# pipe.load_lora_weights("/data/models/FLUX.1-Turbo-Alpha")                                           
pipe.to("cuda")

def val_transformer(pipeline, 
                    jsonl_file,):
    # os.makedirs(val_save_dir,exist_ok=True)
    # 检车 save dir 中最大的数字，在这个数字之上加 100 作为start id
    dirs = os.listdir(par_lora_dir)
    start_id = 6000


    end_id = max( [ int(d.split('-')[-1]) for d in dirs
                                        if d.startswith('checkpoint')] )

    # pdb.set_trace()
    target_shape = (1776, 576) # w h

    for id in range(start_id,end_id+1,500):
        val_save_dir = os.path.join( par_lora_dir,f'val_{id}')
        os.makedirs(val_save_dir,exist_ok=True)
        
        torch.cuda.empty_cache()
        gc.collect()

        print('id start with ', id)

        lora_path = get_lora_path( lora_dir(id) , lora_name )

        pipe.load_lora_weights(lora_path)

        device = pipeline.device
        weight_dtype = pipeline.dtype

        autocast_ctx = torch.autocast(device.type, weight_dtype)
        # 从 json 中直接读取 val img path
        '''jsonl
        {"file_name": "image/1756440109903_image.jpg", 
        "control_image": "control_image/1756440109903_control.jpg", 
        "prompt": "[...] ...."}
        {...}...
        '''
        import json
        # 读取jsonl中数据，遍历 control img 和 prompt
        val_data = []
        with open(jsonl_file, encoding='utf-8') as f:
            for line in f:
                data = json.loads(line.strip())
                val_data.append(data)

        for dict_data in val_data:
            prom = dict_data["prompt"]
            img_path = osj(val_dir, dict_data["control_image"]) 
            img_tar_path = osj(val_dir, dict_data["file_name"])

            img_ori_pil = process_img_1024(img_path, target_shape=target_shape)
            img_target_pil = process_img_1024(img_tar_path, target_shape=target_shape)

            with autocast_ctx:
                image = pipeline(
                    prompt=prom,
                    image=img_ori_pil,
                    num_inference_steps=30,
                    guidance_scale=2.5,
                    generator=torch.Generator(device=device).manual_seed(42),
                    max_sequence_length=512,
                    height=img_ori_pil.height,
                    width=img_ori_pil.width,
                ).images[0]
            
            image = process_img_1024( '', image, 
                            img_ori_pil.size,)

            concat_img = vertical_concat_images([
                img_ori_pil, image, img_target_pil
            ])
            concat_img.save('tmp_depredux.jpg')
            concat_img.save( os.path.join( val_save_dir, 
                        f'{id}_{os.path.basename(img_path)}' ) )

            # break
        print('val end..........')

        


if __name__ == "__main__":
    val_transformer(pipe,
                    val_json_file,
                    )