import os
from glob import glob
import torch
from torch import nn
from safetensors import safe_open


def default_weight_loader(param: nn.Parameter, loaded_weight: torch.Tensor):
    # param: 模型中的可训练参数（如 nn.Linear 的 weight）
    # loaded_weight: 从 safetensors 文件加载的权重张量
    param.data.copy_(loaded_weight) # 张量浅复制（避免内存冗余，保持设备一致性）


def load_model(model: nn.Module, path: str):
    packed_modules_mapping = getattr(model, "packed_modules_mapping", {})
    for file in glob(os.path.join(path, "*.safetensors")):
        # 用 safe_open 加载 safetensors 文件，指定设备为 "cpu"（避免GPU内存占用）
        with safe_open(file, "pt", "cpu") as f:
            for weight_name in f.keys():
                # 遍历权重文件中的每个权重键名（如 "model.layers.0.attention.q_proj.weight"）
                for k in packed_modules_mapping:
                    # 检查当前权重是否属于“打包模块”（权重名包含打包前缀 k）
                    if k in weight_name:
                        # 从映射表中获取：实际参数前缀 v + 分片ID shard_id
                        v, shard_id = packed_modules_mapping[k]
                        # 替换权重名：将打包前缀 k 替换为实际参数前缀 v，得到模型中参数的真实名称
                        param_name: str = weight_name.replace(k, v)
                        # 从模型中获取对应的参数（nn.Parameter 实例）
                        param = model.get_parameter(param_name)
                        # 获取该参数自定义的权重加载逻辑（模块可通过设置 weight_loader 覆盖默认逻辑）
                        weight_loader = getattr(param, "weight_loader")
                        # 调用自定义加载逻辑：传入参数、加载的张量、分片ID（处理分片权重）
                        weight_loader(param, f.get_tensor(weight_name), shard_id)
                        break
                else:
                    param = model.get_parameter(weight_name)
                    weight_loader = getattr(param, "weight_loader", default_weight_loader)
                    weight_loader(param, f.get_tensor(weight_name)) # 加载权重（仅传入参数和张量，无分片ID）
