# import torch

# from utils.MODEL_CKP import FLUX_KONTEXT
# from utils.util_for_os import ose,osj
# from diffusers import FluxKontextPipeline

# pipe = FluxKontextPipeline.from_pretrained(
#     FLUX_KONTEXT,
#     torch_dtype="bfloat16"
# )
# pipe.load_lora_weights("/data/models/FLUX.1-Turbo-Alpha")                                           

# par_lora_dir = '/mnt/nas/shengjie/resdepth_output_0922'
# lora_dir = lambda id: f'{par_lora_dir}/checkpoint-{id}'
# lora_name = 'pytorch_lora_weights.safetensors'

# id = 3000
# def get_lora_path(lora_dir , lora_name):
#     return osj( lora_dir , lora_name )
# lora_path = get_lora_path( lora_dir(id) , lora_name )

# pipe.load_lora_weights(lora_path)
# pipe = pipe.to(dtype=torch.bfloat16)

# 4. 保存为单个文件（会自动处理分片）
# pipe.save_pretrained(
#     "/mnt/nas/shengjie/merged_kontext_model",
#     safe_serialization=True,
#     max_shard_size="30GB",
# )

import os
from tqdm import tqdm
from safetensors.torch import load_file, save_file

def main():
    config = {}
    config['lora_file'] = 'kontext-resdepth-lora.safetensors'
    config['checkpoint_file'] = 'flux1-kontext-dev.safetensors'
    config['merge_type'] = 'blend'
    config['merge_ratio'] = 1.0
    merge_lora_with_checkpoint(config)

# 主入口函数：合并LoRA权重和主模型权重
def merge_lora_with_checkpoint(config):
    print(f"\nStarting the merge process with configuration: {config}")

    # 定义LoRA和checkpoint的目录
    lora_dir = "/home/shengjie/code/ComfyUI/models/loras"
    checkpoint_dir = "/home/shengjie/code/ComfyUI/models/diffusion_models"
    output_dir = "/data/models/Kontext-restoredepth/"

    # 构建LoRA和checkpoint的文件路径
    lora_path = os.path.join(lora_dir, config['lora_file'])
    checkpoint_path = os.path.join(checkpoint_dir, config['checkpoint_file'])

    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)

    # 加载LoRA和checkpoint的权重
    lora_data = load_file(lora_path)
    checkpoint_data = load_file(checkpoint_path)

    # 根据配置选择合并策略
    if config['merge_type'] == 'blend':
        # 全量合并，使用统一的比例
        merged_model = full_merge(lora_data, checkpoint_data, config['merge_ratio'])
    else:
        # 按层合并，每层有不同的比例
        merged_model = selective_merge(lora_data, checkpoint_data, config['merge_weights'])

    # 保存合并后的模型
    save_merged_model(merged_model, output_dir, config['lora_file'], config['checkpoint_file'], config['merge_ratio'])

    print("Merge completed successfully!")

# 全量合并函数：所有层都用同一个比例合并
def full_merge(lora_data, checkpoint_data, ratio):
    merged = {}
    # 获取所有层的集合
    total_layers = set(checkpoint_data.keys()).union(lora_data.keys())

    for layer in tqdm(total_layers, desc="Merging Layers", unit="layer"):
        if layer in checkpoint_data and layer in lora_data:
            # 两边都有该层，按比例合并
            merged[layer] = checkpoint_data[layer] + (ratio * lora_data[layer])
        elif layer in checkpoint_data:
            # 只有checkpoint有该层
            merged[layer] = checkpoint_data[layer]
        else:
            # 只有lora有该层
            merged[layer] = ratio * lora_data[layer]
    
    return merged

# 按层合并函数：每层可以有不同的合并比例
def selective_merge(lora_data, checkpoint_data, merge_weights):
    merged = {}
    # 获取所有层的集合
    total_layers = set(checkpoint_data.keys()).union(lora_data.keys())

    for layer in tqdm(total_layers, desc="Selective Merging", unit="layer"):
        if layer in merge_weights:
            # 如果该层有指定的合并比例
            ratio = merge_weights[layer]
            merged[layer] = checkpoint_data.get(layer, 0) + (ratio * lora_data.get(layer, 0))
        else:
            # 没有指定比例，优先用checkpoint，否则用lora
            merged[layer] = checkpoint_data.get(layer, lora_data.get(layer))

    return merged

# 保存合并后的模型到磁盘
def save_merged_model(merged_data, output_dir, lora_file, checkpoint_file, ratio):
    lora_name = os.path.splitext(lora_file)[0]
    checkpoint_name = os.path.splitext(checkpoint_file)[0]
    # 构建输出文件名，包含合并比例信息
    output_file = f"{checkpoint_name}_merged_with_{lora_name}_r{int(ratio * 100)}.safetensors"
    output_path = os.path.join(output_dir, output_file)

    # 保存文件
    save_file(merged_data, output_path)
    print(f"Model saved as: {output_file}")

if __name__=='__main__':
    main()