# ddp_demo.py
import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import os
import argparse
from ai.run import runner
from ai import store_model
from ai import store_data
from torch import distributed

os.environ['OMP_NUM_THREADS'] = "1"

parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, default='0,1,2,3')
parser.add_argument('--batchSize', type=int, default=64)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--dataset-size', type=int, default=1024)
parser.add_argument('--num-classes', type=int, default=10)
config = parser.parse_args()

os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id
torch.distributed.init_process_group(backend="nccl")

local_rank = torch.distributed.get_rank()
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)

# --------------------------------------------------------- start your code

# 实例化模型、数据集和加载器loader
model = store_model.model(store_model.ModelName.MNIST)
# 得到训练数据
train_dataset, valid_dataset = store_data.DataStore('mnist').get_dataset()
# 得到优化器
optimizer = SGD(model.parameters(), lr=0.1, momentum=0.9)

model_name = store_model.ModelName.MNIST
# -------------------------------------------------------- end your code


sampler1 = DistributedSampler(train_dataset)  # 这个sampler会自动分配数据到各个gpu上
loader1 = DataLoader(train_dataset, batch_size=config.batchSize, sampler=sampler1)
sampler2 = DistributedSampler(valid_dataset)  # 这个sampler会自动分配数据到各个gpu上
loader2 = DataLoader(valid_dataset, batch_size=config.batchSize, sampler=sampler2)
# loader = DataLoader(dataset, batch_size=config.batchSize, shuffle=True)
loss_func = nn.CrossEntropyLoss()

if torch.cuda.is_available():
    model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)

runner_o = runner.RunnerGxl(model, optimizer, loss_func, loader1, loader2, multi= True, local_rank=local_rank, model_name=model_name)
runner_o.run(config.epochs)
