import loguru
import time

from   accelerate import Accelerator
from   utils import get_loader, get_model

logger = loguru.logger
_, _, dataloader = get_loader()
model, optimizer, lr_scheduler = get_model()

#! 1. 使用4步梯度累积
#! 2. 使用不同的精度测试时间消耗和显存消耗: 'no','fp8','fp16','bf16'
"""
bs=32, mixed_precision=fp16, time cost: 6.9s,  memory 4070MB;
bs=32, mixed_precision=no,   time cost: 13.8s, memory 4848MB;
bs=32, mixed_precision=bf16, time cost: 7.2s,  memory 4070MB;
"""
accelerator = Accelerator(gradient_accumulation_steps=4, mixed_precision="fp16")

dataloader, model, optimizer, lr_scheduler = accelerator.prepare(dataloader, 
                                                                model, 
                                                                optimizer, 
                                                                lr_scheduler
                                                                )
now = time.time()
for idx, data in enumerate(dataloader):
    with accelerator.accumulate(model):    # 在这个范围内累积梯度
        optimizer.zero_grad()
        out = model(**data)
        accelerator.backward(out.loss)

        """
        裁剪阈值被设置为 1.0 表示梯度的范数被限制在 1.0 内。具体来说，clip_grad_norm_ 函数会计算模型的所有参数的梯度的范数，
        并且如果这个范数大于给定的阈值（在这个例子中是 1.0），那么就会对所有梯度进行缩放，使得梯度的整体范数等于阈值。这个操作
        确保了梯度不会过大，从而有助于避免训练过程中的数值稳定性问题，比如梯度爆炸。
        """
        if accelerator.sync_gradients:
            accelerator.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()
        lr_scheduler.step()
        

    if idx % 1 == 0:
        lr = optimizer.state_dict()['param_groups'][0]['lr']

        labels = data['labels']
        logits = out['logits'].argmax(1)
        acc = (labels == logits).sum().item() / len(labels)

        logger.info(f"idx = {idx:3d},  loss = {out.loss.item():.4f}, lr = {lr:.4f}, acc = {acc:.4f}")
        
logger.warning(f"time delta = {time.time() - now}")

"""
# 运行方式
1. python quick_start.py
2. accelerate launch --cpu quick_start.py      # CPU运行
3. accelerate launch --config_file /root/.cache/huggingface/accelerate/default_config.yaml  quick_start.py --args_for_script
4. accelerate launch --multi_gpu quick_start.py
5. accelerate launch --multi_gpu --mixed_precision=fp16 quick_start.py
6.
7. accelerate launch --help
"""