import torch
"""
模型转换成 fp32
"""
def transfer_to_fp32(model_path, new_path):

    print("🔹 Loading model...")
    model = torch.load(model_path, map_location="cpu", weights_only=False)

    # ---- 1. 处理参数 ----
    for name, p in model.named_parameters():
        if p.is_floating_point():
            old_dtype = p.dtype
            p.data = p.data.float()
            print(f"Converted parameter {name}: {old_dtype} -> {p.data.dtype}")

    # ---- 2. 处理 buffer ----
    for name, b in model.named_buffers():
        if b.is_floating_point():
            old_dtype = b.dtype
            b.data = b.data.float()
            print(f"Converted buffer {name}: {old_dtype} -> {b.data.dtype}")

    print("💾 Saving float32 model...")
    torch.save(model, new_path)
    print("✅ Conversion done! Saved to", new_path)

if __name__ == "__main__":
    model_path = "small.model"
    new_path   = "small-fp32.model"
    transfer_to_fp32(model_path, new_path)
