# ddp_demo.py
import source
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import os
import argparse
from torch import distributed, optim

from ai.audio_dataset.classify import data_handler
from ai.audo_model.classify import xvector, ecapa_tdnn
from ai.config.config import GxlNode
from project.wav_classify.my_runner import GxlRunnerGxl
from ai.utils import utils_model
os.environ['OMP_NUM_THREADS'] = "1"

parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, default='0,1,2,3,4,5,6,7')  # 不设置默认值也不赋值的话会爆出找不到gpus的错误
config_parser = parser.parse_args()

os.environ['CUDA_VISIBLE_DEVICES'] = config_parser.gpu_id
torch.distributed.init_process_group(backend="nccl")

local_rank = torch.distributed.get_rank()
torch.cuda.set_device(
    local_rank)  # 只能设置local_rank，设为int(config_parser.gpu_id.split(',')[localrank]),
# 由于环境变量的设置，程序会使用gpu_id里面的设别，但是计数仍然是0,1,2...

# --------------------------------------------------------- start your code
config = GxlNode.get_config_from_yaml('./config.yaml')
utils_model.set_random_seed(config.random.seed)
data_dev_iter = data_handler.get_iter_by_json('./output/same_dev_1000.json', config)
net = ecapa_tdnn.GxlTDNN(19)
optimizer = torch.optim.Adam(net.parameters(), lr=config.optim.lr)
lr_schedule = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=5, T_mult=2)
loss_func = nn.CrossEntropyLoss()
train_dataset = data_handler.get_dataset_by_json('./output/same_train_30000.json', config)
# -------------------------------------------------------- end your code


sampler1 = DistributedSampler(train_dataset)  # 这个sampler会自动分配数据到各个gpu上
# shuffle=True指的是先全局打乱, 再取batch
loader1 = DataLoader(train_dataset, batch_size=config.dataset.batch_size, sampler=sampler1,
                     collate_fn=data_handler.collate_fn, drop_last=True, num_workers=10, pin_memory=True)

if torch.cuda.is_available():
    net.cuda()
model = torch.nn.parallel.DistributedDataParallel(net)

runner_man = GxlRunnerGxl(model, optimizer, loss_func,
                          loader1, config, data_dev_iter, scheduler=lr_schedule, multi=True, local_rank=local_rank, )
runner_man.run(config.train.epochs)
