# -*- coding:utf8 -*-
# @Time : 2021/10/12 8:45 下午
# @Author : WanJie Wu

import os
import time
import torch
import argparse
import numpy as np
import torch.nn as nn
from tqdm import tqdm
import torch.optim as optim
import torch.distributed as dist
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"

np.random.seed(100)
torch.manual_seed(100)
USABLE_GPU_COUNT = torch.cuda.device_count()


class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.net1 = nn.Linear(10, 10)
        self.relu = nn.ReLU()
        self.net2 = nn.Linear(10, 2)

    def forward(self, x):
        output = self.net2(self.relu(self.net1(x)))
        return output


class DummyDataset(Dataset):
    def __init__(self, mode="train"):
        if mode == "train":
            self.num = 100
        else:
            self.num = 10
        self.input_x = torch.randn(self.num, 10)
        self.input_y = torch.randint(low=0, high=2, size=(self.num, ))

    def __len__(self):
        return self.num

    def __getitem__(self, index):
        return {
            "input_x": self.input_x[index],
            "input_y": self.input_y[index]
        }


def dummy_data(args, batch_size=4):
    datasets = DummyDataset()
    if not args.is_distributed or USABLE_GPU_COUNT == 1:
        data_sampler = None
        shuffle = True
    else:
        data_sampler = DistributedSampler(dataset=datasets, num_replicas=USABLE_GPU_COUNT)
        shuffle = False
    train_loader = DataLoader(
        dataset=datasets,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=0,
        sampler=data_sampler
    )
    return train_loader


def demo_basic(args, model, device_ids):
    optimizer = optim.SGD(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()
    train_loader = dummy_data(args, batch_size=4)
    for batch in tqdm(train_loader, desc="训练进度条"):
        optimizer.zero_grad()
        output = model(batch["input_x"].to(device_ids[0]))
        loss = criterion(output, batch["input_y"].to(device_ids[0]))
        loss = loss.mean()
        loss.backward()
        optimizer.step()


def spmd_main(args):
    if USABLE_GPU_COUNT == 0:
        raise Exception("must have gpu when training model...")

    if USABLE_GPU_COUNT == 1 or not args.is_distributed:
        print("单GPU训练...")
        device_ids = [0]
        model = MyModel().cuda(device_ids[0])
        demo_basic(args, model, device_ids)
    else:
        print("多GPU训练...")
        env_dict = {
            key: os.environ[key]
            for key in ("MASTER_ADDR", "MASTER_PORT", "RANK", "WORLD_SIZE")
        }
        print(f"[{os.getpid()}] Initializing process group with: {env_dict}")
        dist.init_process_group(backend="nccl")
        print(
            f"[{os.getpid()}] world_size = {dist.get_world_size()}, "
            + f"rank = {dist.get_rank()}, backend={dist.get_backend()}"
        )
        assert USABLE_GPU_COUNT % args.local_world_size == 0
        gpus_per_process = USABLE_GPU_COUNT // args.local_world_size  # 每个进程跨越的GPU数量
        device_ids = list(range(args.local_rank * gpus_per_process, (args.local_rank + 1) * gpus_per_process))
        print(
            f"[{os.getpid()}] rank = {dist.get_rank()}, "
            + f"world_size = {dist.get_world_size()}, gpus_per_process = {gpus_per_process}, device_ids = {device_ids}"
        )
        model = MyModel().cuda(device_ids[0])
        ddp_model = DDP(model, device_ids)
        demo_basic(args, ddp_model, device_ids)
        dist.destroy_process_group()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("--local_world_size", type=int, default=2)
    parser.add_argument("--is_distributed", type=int, default=1)
    parser_args = parser.parse_args()
    spmd_main(parser_args)
