import os
import sys
import time
import socket
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import matplotlib.pyplot as plt
import torch.multiprocessing as mp
from torch.multiprocessing import Process
from torchvision import datasets, transforms
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm

# 此时不可以正常显示中文标题，则加上以下代码
plt.rcParams['font.sans-serif'] = ['KaiTi', 'FangSong', 'SimHei']
plt.rcParams['font.size'] = 12
plt.rcParams['axes.unicode_minus'] = False


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, (5, 5))
        self.pool1 = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, (5, 5))
        self.pool2 = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(16*4*4, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.pool1(x)
        x = F.relu(self.conv2(x))
        x = self.pool2(x)
        x = x.view(-1, 16*4*4)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, p=0.3)
        x = F.relu(self.fc2(x))
        x = F.dropout(x, p=0.1)
        x = self.fc3(x)
        return x


def make_dataset(rank):
    """ Partitioning MNIST """
    train_set = datasets.MNIST('./data', train=True, download=False,
                             transform=transforms.Compose([transforms.ToTensor(),
                                                           transforms.Normalize((0.1307, ), (0.3081, ))]))
    test_set = datasets.MNIST('./data', train=False, download=False,
                             transform=transforms.Compose([transforms.ToTensor(),
                                                           transforms.Normalize((0.1307, ), (0.3081, ))]))
    size = dist.get_world_size()
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, num_replicas=size, rank=rank)

    test_sampler = torch.utils.data.distributed.DistributedSampler(test_set, num_replicas=size, rank=rank)

    batch_size = 32

    train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, batch_size, drop_last=True)

    train_loader = torch.utils.data.DataLoader(train_set, batch_sampler=train_batch_sampler,
                                               num_workers=1, pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_set, sampler=test_sampler,
                                              num_workers=1, pin_memory=True, batch_size=batch_size)
    return train_sampler, train_loader, test_loader


def local_train(ddp_model, train_sampler, rank, train_loader, cost, local_epoches, size):
    optimizer = optim.Adam(ddp_model.parameters(), lr=0.0001)
    criterion = nn.CrossEntropyLoss()
    for epoch in range(local_epoches):
        train_sampler.set_epoch(epoch)  # 这种方法使得我们的各个设备在每一轮所获得的数据都是不一样的。
        running_loss = 0.0
        total_loss = 0
        if rank == 0:
            train_loader = tqdm(train_loader, file=sys.stdout)
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            if size == 1:
                inputs = inputs.cuda(rank)
                labels = labels.cuda(rank)
            optimizer.zero_grad()
            outputs = ddp_model(inputs)
            loss = criterion(outputs, labels)  # 不可以直接对loss进行all_reduce。但是我们可以进行梯度/模型聚合。
            # if size > 1:
            #     loss = loss/size
            #     dist.all_reduce(loss, op=dist.ReduceOp.SUM)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            if (i + 1) % (60000/(32*5*size)) == 0:
                cost.append(running_loss / (60000/(32*5*size)))
                total_loss += running_loss
                running_loss = 0
        dist.barrier()  # 防止打印的信息混乱
        print("[Rank:{}][epoch:{}]  loss:{}".format(rank, epoch + 1, total_loss))


def local_eval(ddp_model, test_loader, rank, size):
    # 模型预测
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            inputs, labels = data
            if size == 1:
                inputs = inputs.cuda(rank)
                labels = labels.cuda(rank)
            outputs = ddp_model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print("Accuracy on the 10000 test images:{}%".format(100 * correct / total))


def average_gradients(model):
    """ Gradient averaging. """
    size = float(dist.get_world_size())
    for param in model.parameters():
        dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
        param.grad.data /= size


def run(rank, size):
    """ Distributed Synchronous SGD Example """
    cost = []  # 构造列表
    local_epoches = 10
    global_epoches = 1
    ddp_model = Net()
    # ddp_model = DDP(ddp_model)  # 好像CPU多进程不需要DDP模型包装
    if size == 1:
        ddp_model = ddp_model.cuda(rank)
        ddp_model = DDP(ddp_model, device_ids=[rank], output_device=rank)
    t1 = time.perf_counter()  # 用来计算运行时间

    for i in range(global_epoches):
        train_sampler, train_loader, test_loader = make_dataset(rank)
        local_train(ddp_model, train_sampler, rank, train_loader, cost, local_epoches, size)
        local_eval(ddp_model, test_loader, rank, size)

    if rank == 0:
        print("Total_timecost : {}".format(time.perf_counter() - t1))
        plt.plot(cost)
        plt.xlabel('mini-batches(per {})'.format((60000/(32*5*size))))
        plt.ylabel('running_loss')
        plt.title('[Rank{}]running_loss的变化趋势'.format(rank))
        plt.show()
    # model_path = "./pre_models/LeNet_5.pth"
    # torch.save(ddp_model.state_dict(), model_path)  # 模型的保存
    dist.destroy_process_group()  # 释放进程组


def init_processes(rank, size, fn, backend='gloo'):
    """ Initialize the distributed environment. """
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)  # 无线局域网适配器WLAN所对应的IPV4地址
    s.connect(("8.8.8.8", 80))
    os.environ['MASTER_ADDR'] = s.getsockname()[0]
    os.environ['MASTER_PORT'] = '29500'
    dist.init_process_group(backend, rank=rank, world_size=size)
    fn(rank, size)


if __name__ == "__main__":
    size = 3
    mp.spawn(init_processes,  # 主运行函数
             args=(size, run),  # 主函数的参数
             nprocs=size,  # 当前节点的进程数
             join=True)  # 加入同一进程池
