import torch
import onnxruntime as ort
import psutil
import os

def get_memory_usage():
    process = psutil.Process(os.getpid())
    return process.memory_info().rss / (1024 * 1024)  # 以 MB 计算

# 计算加载 PyTorch 模型的内存消耗
torch_model = torch.load("model.pth", map_location="cpu")  # 只在 CPU 加载
torch_mem = get_memory_usage()

# 计算加载 ONNX 模型的内存消耗
onnx_sess = ort.InferenceSession("BiFNet_48_33125_bak.onnx")
onnx_mem = get_memory_usage()

print(f"Memory Usage after loading PyTorch Model: {torch_mem:.2f} MB")
print(f"Memory Usage after loading ONNX Model: {onnx_mem:.2f} MB")