import loguru
import torch
import time

from   accelerate import Accelerator
from   accelerate import load_checkpoint_in_model
from   accelerate import init_empty_weights
from   accelerate import load_checkpoint_and_dispatch
from   utils      import get_loader, get_model

logger = loguru.logger


model_directory = "model/accelerator.save_model"
with init_empty_weights():      #! 构建模型时不初始化参数
    model, optimizer, lr_scheduler = get_model()

#! version-1
# gpu-0:772MB 0.61s
# model = load_checkpoint_and_dispatch(model, checkpoint=model_directory,
#                                     #! device = auto   自动分配，优先显存，然后内存，最后磁盘
#                                     device_map = {
#                                             "bert":0,                # "cuda:0"
#                                             "dropout":'cpu',         # 加载到系统内存
#                                             "classifier": "disk",    # 加载到磁盘
#                                         },
#                                     offload_folder="offload_folder", # 加载到磁盘的部分指定路径
#                                 )

#! version-2
# gpu-0:772MB gpu-0:304MB 0.57s
model = load_checkpoint_and_dispatch(model, checkpoint=model_directory,
                                    #! device = auto   自动分配，优先显存，然后内存，最后磁盘
                                    device_map = {
                                            "bert"       : 0,           # "cuda:0"
                                            "dropout"    : 1,           # "cuda:1"
                                            "classifier" : 1,           # "cuda:1"
                                        }
                                )

#! 加载到多个设备的模型前向计算
input_ids = torch.randint(0,15,[2,15])

start  = time.time()
logits = model(input_ids).logits

logger.info(f"time cost: {(time.time() - start):.4f}")

while True:           # 便于观察显存占用
    pass