# 单机多卡训练

import os
import torch
import loguru
import random
import datetime
from   utils            import get_loader, get_model
from   accelerate       import Accelerator
from   accelerate.utils import DummyOptim, DummyScheduler
from   transformers     import get_scheduler

#控制随机数
seed   = 100
logger = loguru.logger
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)

*_, loader = get_loader(text_lens=100)
model, optimizer, scheduler  = get_model()


# 机器编号
accelerator = Accelerator(gradient_accumulation_steps=4, mixed_precision="fp16")

print('rank       = ',                        os.environ.get('RANK', None))
print('local_rank = ',                        os.environ.get('LOCAL_RANK', None))
print('accelerator.distributed_type      = ', accelerator.distributed_type)
print('accelerator.is_local_main_process = ', accelerator.is_local_main_process)
print('accelerator.is_main_process       = ', accelerator.is_main_process)

#! 使用DeepSpeed的ds_config.json是，优化器和调度器必须这样指定，都使用dummy的

# model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler)
model , optimizer, loader, scheduler = accelerator.prepare(model , optimizer, loader, scheduler)

now = datetime.datetime.now()
for i, data in enumerate(loader):
    optimizer.zero_grad()
    out = model(**data)
    accelerator.backward(out.loss)
    
    accelerator.clip_grad_norm_(model.parameters(), 1.0)
    optimizer.step()
    scheduler.step()
    
    
    if i % 1 == 0:
        lr     = optimizer.state_dict()['param_groups'][0]['lr']
        labels = data['labels']
        logits = out['logits'].argmax(1)
        acc    = (labels == logits).sum().item() / len(labels)
        logger.info(f"idx = {i:3d},  loss = {out.loss.item():.4f}, device = {accelerator.device}")

print(datetime.datetime.now() - now)

accelerator.wait_for_everyone()
if accelerator.is_main_process and accelerator.is_local_main_process:
    print('model.save_pretrained(...)')

#! 1. 单卡训练
# python train_sn_mgpu.py 

#! 2. 多卡训练  
# TODO: 好像没生效
# 使用两张卡
# accelerate launch --num_processes=2  --multi_gputrain_sn_mgpu.py 

# 所有使用卡
# accelerate launch --multi_gpu train_sn_mgpu.py 