# -*- coding: UTF-8 -*-
"""
@Date    ：2025/10/15 18:35 
@Author  ：Liu Yuezhao
@Project ：bert 
@File    ：split_model.py
@IDE     ：PyCharm 
"""
import torch
import os
import glob


def print_structure(obj, indent=0):
    prefix = "  " * indent
    if isinstance(obj, dict):
        for k, v in obj.items():
            print(f"{prefix}{k}: {type(v)}")
            if isinstance(v, dict):
                print_structure(v, indent + 1)
    else:
        print(f"{prefix}{type(obj)}")


def split_checkpoint_by_size(model_path, tensor_key='model_state_dict', max_size_mb=50, output_prefix="chunk"):
    """
    将模型 checkpoint 按指定大小分割，meta_data 单独保存。

    Args:
        model_path (str): 原始模型路径
        tensor_key (str): 模型状态字典的 key
        max_size_mb (int): 每个分片最大大小（MB），仅针对模型参数
        output_prefix (str): 输出文件前缀
    """
    print(f"Loading model from {model_path}...")
    ckpt = torch.load(model_path, map_location='cpu')

    if tensor_key not in ckpt:
        raise KeyError(f"Key '{tensor_key}' not found. Available keys: {list(ckpt.keys())}")

    state_dict = ckpt[tensor_key]
    meta_data = {k: v for k, v in ckpt.items() if k != tensor_key}

    # 保存 meta_data 到单独文件（如果存在）
    if meta_data:
        meta_path = f"{output_prefix}_meta.pth"
        torch.save(meta_data, meta_path)
        meta_size_mb = os.path.getsize(meta_path) / (1024 * 1024)
        print(f"Saved {meta_path} ({meta_size_mb:.2f} MB) [metadata only]")
    else:
        print("No metadata found.")

    # 分割 state_dict
    max_bytes = max_size_mb * 1024 * 1024
    tensor_items = list(state_dict.items())

    chunks = []
    current_chunk = {}
    current_size = 0

    for key, tensor in tensor_items:
        tensor_size = tensor.element_size() * tensor.nelement()
        if current_size + tensor_size > max_bytes and current_size > 0:
            chunks.append(current_chunk.copy())
            current_chunk = {key: tensor}
            current_size = tensor_size
        else:
            current_chunk[key] = tensor
            current_size += tensor_size
    if current_chunk:
        chunks.append(current_chunk)

    # 保存每个 chunk（仅包含 tensor_key）
    for i, chunk in enumerate(chunks):
        chunk_path = f"{output_prefix}_{i + 1}.pth"
        torch.save({tensor_key: chunk}, chunk_path)
        size_mb = sum(v.element_size() * v.nelement() for v in chunk.values()) / (1024 ** 2)
        file_size_mb = os.path.getsize(chunk_path) / (1024 ** 2)
        print(f"Saved {chunk_path} (params: {size_mb:.2f} MB, file: {file_size_mb:.2f} MB)")


def merge_state_dict_only(input_prefix, output_path, tensor_key="model_state_dict"):
    """
    合并分片，仅输出纯净的 model_state_dict（不包含任何 meta_data）。
    输出格式：直接保存 state_dict 字典（等价于 torch.save(model.state_dict(), ...)）
    """
    # 查找所有分片（排除 _meta.pth）
    chunk_files = [f for f in glob.glob(f"{input_prefix}_*.pth") if not f.endswith("_meta.pth")]
    if not chunk_files:
        raise FileNotFoundError(f"No chunk files found for prefix: {input_prefix}")

    chunk_files.sort(key=lambda x: int(x.split('_')[-1].split('.')[0]))

    merged_state_dict = {}
    for part_file in chunk_files:
        print(f"Loading {part_file}...")
        ckpt = torch.load(part_file, map_location='cpu')
        if tensor_key not in ckpt:
            raise KeyError(f"Key '{tensor_key}' missing in {part_file}")
        merged_state_dict.update(ckpt[tensor_key])

    # 仅保存 state_dict（无任何 meta）
    torch.save(merged_state_dict, output_path)
    size_mb = os.path.getsize(output_path) / (1024 ** 2)
    print(f"Saved pure state_dict to {output_path} ({size_mb:.2f} MB)")


def verify_state_dict_equal(original_path, pure_state_dict_path, tensor_key="model_state_dict"):
    """
    验证：
      - original_path 中的 ckpt[tensor_key]
      - 与 pure_state_dict_path 中的整个内容
    是否完全一致。
    """
    print("Verifying state_dict equality...")

    # 加载原始 checkpoint 中的 state_dict
    original_ckpt = torch.load(original_path, map_location='cpu')
    if tensor_key not in original_ckpt:
        raise KeyError(f"Original checkpoint missing key: {tensor_key}")
    orig_sd = original_ckpt[tensor_key]

    # 加载合并后的纯 state_dict
    merged_sd = torch.load(pure_state_dict_path, map_location='cpu')

    # 类型检查
    if not isinstance(merged_sd, dict):
        raise TypeError("Merged file is not a state_dict (not a dict)")

    # 键一致性
    if set(orig_sd.keys()) != set(merged_sd.keys()):
        missing = set(orig_sd.keys()) - set(merged_sd.keys())
        extra = set(merged_sd.keys()) - set(orig_sd.keys())
        raise AssertionError(f"Keys mismatch!\nMissing: {missing}\nExtra: {extra}")

    # 张量逐元素比较
    for key in orig_sd:
        t1 = orig_sd[key]
        t2 = merged_sd[key]
        if not torch.equal(t1, t2):
            # 可选：使用 allclose 容忍微小浮点误差（如保存格式变化）
            # if not torch.allclose(t1, t2, atol=1e-6):
            raise AssertionError(f"Tensor mismatch at key: {key}")

    print("✅ State dict is bit-for-bit identical!")


if __name__ == '__main__':
    model_path = "../../checkpoints/pretrain/best_mlm_model.pth"

    # 可选：打印结构
    # ckpt = torch.load(model_path, map_location='cpu')
    # print_structure(ckpt)

    # 分割
    split_checkpoint_by_size(
        model_path=model_path,
        tensor_key="model_state_dict",
        max_size_mb=20,
        output_prefix="model_part"
    )

    # 合并
    merge_state_dict_only(
        input_prefix="model_part",
        output_path="bert_MLM_model.pth",
        tensor_key="model_state_dict"
    )

    # 验证
    verify_state_dict_equal(model_path, "merged_model.pth")

    # 可选：清理分片
    # for f in glob.glob("model_part_*.pth"):
    #     os.remove(f)
    #     print(f"Removed {f}")