import loguru
import torch
import time

from   accelerate import Accelerator
from   accelerate import load_checkpoint_in_model
from   accelerate import init_empty_weights
from   accelerate import load_checkpoint_and_dispatch
from   accelerate.utils import load_and_quantize_model, BnbQuantizationConfig
from   utils      import get_loader, get_model
from transformers import BertForSequenceClassification

logger = loguru.logger

_, _, dataloader = get_loader()

# fp8, fp4
with init_empty_weights():      #! 构建模型时不初始化参数
    model, optimizer, lr_scheduler = get_model()

# fp16   
# model, optimizer, lr_scheduler = get_model()
# model.save_pretrained('model/save_pretrained')


#! 1. fp16推理
# 加载float16的模型
model = BertForSequenceClassification.from_pretrained('model/save_pretrained', 
                                torch_dtype=torch.float32, device_map={'': 0})

#! 2. fp8 推理
# bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True,llm_int8_threshold=6)


#! 3. fp4 推理
# bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, bnb_4bit_quant_type='nf4',
#                         bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True)

# load_and_quantize_model(model, weights_location='model/save_pretrained',
#                         device_map={'': 0},bnb_quantization_config=bnb_quantization_config)


start = time.time()
for i, data in enumerate(dataloader):
    with torch.no_grad():
        model(**data.to('cuda:0'))

"""
one epoch, fp32, 4.48s, 930MB, A10  
one epoch, fp16, 2.63s, 598MB, A10  
one epoch, fp8,  5.18s, 578MB, A10  
one epoch, fp4,  3.28s, 472MB, A10  
"""
logger.info(f"time cost: {(time.time() - start):.4f}s")

while True:
    pass