# Class mean detection method for full connection layer
import os
import sys
import time
import socket
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import matplotlib.pyplot as plt
import torch.multiprocessing as mp
from torch.multiprocessing import Process
from torchvision import datasets, transforms
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm

# 此时不可以正常显示中文标题，则加上以下代码
plt.rcParams['font.sans-serif'] = ['KaiTi', 'FangSong', 'SimHei']
plt.rcParams['font.size'] = 12
plt.rcParams['axes.unicode_minus'] = False


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, (5, 5))
        self.pool1 = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, (5, 5))
        self.pool2 = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(16*4*4, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.pool1(x)
        x = F.relu(self.conv2(x))
        x = self.pool2(x)
        x = x.view(-1, 16*4*4)
        x = F.relu(self.fc1(x))
        # x = F.dropout(x, p=0.3)
        x = F.relu(self.fc2(x))
        # x = F.dropout(x, p=0.1)
        x = self.fc3(x)
        return x


def make_dataset(rank):
    """ Partitioning MNIST """
    train_set = datasets.MNIST('../data', train=True, download=True,
                             transform=transforms.Compose([transforms.ToTensor(),
                                                           transforms.Normalize((0.1307, ), (0.3081, ))]))
    test_set = datasets.MNIST('../data', train=False, download=True,
                             transform=transforms.Compose([transforms.ToTensor(),
                                                           transforms.Normalize((0.1307, ), (0.3081, ))]))
    # train_set = datasets.FashionMNIST('../data', train=True, download=True,
    #                            transform=transforms.Compose([transforms.ToTensor(),
    #                                                          transforms.Normalize((0.1307,), (0.3081,))]))
    # test_set = datasets.FashionMNIST('../data', train=False, download=True,
    #                           transform=transforms.Compose([transforms.ToTensor(),
    #                                                         transforms.Normalize((0.1307,), (0.3081,))]))

    size = dist.get_world_size()

    batch_size = 32

    train_loader_0 = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=1)

    test_loader_0 = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True, num_workers=1)

    train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, num_replicas=size, rank=rank)

    train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, batch_size, drop_last=True)

    train_loader = torch.utils.data.DataLoader(train_set, batch_sampler=train_batch_sampler,
                                               num_workers=1, pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_set, num_workers=1, pin_memory=True, batch_size=batch_size)
    return train_sampler, train_loader, test_loader, train_loader_0, test_loader_0


def local_train(ddp_model, train_sampler, rank, train_loader, cost, local_epoches, size):
    optimizer = optim.Adam(ddp_model.parameters(), lr=0.0001)
    criterion = nn.CrossEntropyLoss()
    list = []
    for epoch in range(local_epoches):
        train_sampler.set_epoch(epoch)  # 这种方法使得我们的各个设备在每一轮所获得的数据都是不一样的。
        running_loss = 0.0
        total_loss = 0
        if rank == 0:
            train_loader = tqdm(train_loader, file=sys.stdout)
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            for j in range(len(labels)):  # 标签翻转攻击
                if labels[j] == 1:
                    labels[j] = 4
            if size == 1:
                inputs = inputs.cuda(rank)
                labels = labels.cuda(rank)
            optimizer.zero_grad()
            outputs = ddp_model(inputs)  # 得到输出结果和权重向量

            loss = criterion(outputs, labels)  # 不可以直接对loss进行all_reduce。但是我们可以进行梯度/模型聚合。
            if size > 1:
                loss = loss/size
                dist.all_reduce(loss, op=dist.ReduceOp.SUM)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            if (i + 1) % (60000/(32*5*size)) == 0:
                cost.append(running_loss / (60000/(32*5*size)))
                total_loss += running_loss
                running_loss = 0
        dist.barrier()  # 防止打印的信息混乱
        print("[Rank:{}][epoch:{}]  loss:{}".format(rank, epoch + 1, total_loss))
        # 取出权重矩阵
        weight_matrix = []
        # bias_matrix = []
        for name, parm in ddp_model.named_parameters():
            # if (name == 'module.fc3.bias') | (name == 'fc3.bias'):
            #     bias_matrix = parm.detach().unsqueeze(1)
            #     # print(bias_matrix.size())
            if (name == 'module.fc3.weight') | (name == 'fc3.weight'):  # 可以将最后一层全连接层的权重矩阵取出
                # tensor.detach()返回一个新的tensor，从当前计算图中分离下来的，但是仍指向原变量的存放位置,
                # 不同之处只是requires_grad为false，得到的这个tensor永远不需要计算其梯度，不具有grad。
                weight_matrix = parm.detach()
                # if size == 1:
                #     weight_matrix = (parm.detach()).cuda(rank)  # <class 'torch.Tensor'>
        # weight_matrix += bias_matrix / weight_matrix.size()[1]
        mean = torch.ones([weight_matrix.size()[1], 1])  # 84
        mean = mean / weight_matrix.size()[1]
        if size == 1:
            mean = mean.cuda(rank)
        class_mean = weight_matrix.mm(mean)
        # print(class_mean)  # torch.Size([10, 1])
        list.append(class_mean)
    class_mean_all = torch.cat([list[i] for i in range(local_epoches)], dim=1)
    #  torch.Size([10, 10])
    print(class_mean_all.size())
    return class_mean_all


def local_eval(ddp_model, test_loader, rank, size):
    # 模型预测
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            inputs, labels = data
            if size == 1:
                inputs = inputs.cuda(rank)
                labels = labels.cuda(rank)
            outputs = ddp_model(inputs)  # 得到输出结果和权重向量
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print("Accuracy on the 10000 test images:{}%".format(100 * correct / total))


def run(rank, size):
    """ Distributed Synchronous SGD Example """
    cost = []  # 构造列表
    local_epoches = 10
    ddp_model = Net()
    # ddp_model = DDP(ddp_model)  # 好像CPU多进程不需要DDP模型包装
    if size == 1:
        ddp_model = ddp_model.cuda(rank)
        ddp_model = DDP(ddp_model, device_ids=[rank], output_device=rank)
    t1 = time.perf_counter()  # 用来计算运行时间

    train_sampler, train_loader, test_loader, train_loader_0, test_loader_0 = make_dataset(rank)
    class_mean_all = local_train(ddp_model, train_sampler, rank, train_loader, cost, local_epoches, size)
    local_eval(ddp_model, test_loader, rank, size)

    plt.figure()

    # plt.subplot(1, 2, 1)
    print("Total_timecost : {}".format(time.perf_counter() - t1))
    # plt.plot(cost)
    # plt.xlabel('mini-batches(per {})'.format((60000/(32*5*size))))
    # plt.ylabel('running_loss')
    # plt.title('[Rank{}]running_loss的变化趋势'.format(rank))

    # plt.subplot(1, 2, 2)
    for i in range(10):
        plt.plot((class_mean_all[i]*1000).tolist(), label='class {} 类均值'.format(i), linewidth=1.0)  # 将tensor转换为list数据
    plt.legend(loc="best")
    plt.xlabel('epoch')
    plt.ylabel('全连接层类均值')
    plt.title('client {}'.format(rank))
    plt.show()
    dist.destroy_process_group()  # 释放进程组


def init_processes(rank, size, fn, backend='gloo'):
    """ Initialize the distributed environment. """
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)  # 无线局域网适配器WLAN所对应的IPV4地址
    s.connect(("8.8.8.8", 80))
    os.environ['MASTER_ADDR'] = s.getsockname()[0]
    os.environ['MASTER_PORT'] = '29500'
    dist.init_process_group(backend, rank=rank, world_size=size)
    fn(rank, size)


if __name__ == "__main__":
    size = 3  # 目前最大可调整到5
    mp.spawn(init_processes,  # 主运行函数
             args=(size, run),  # 主函数的参数
             nprocs=size,  # 当前节点的进程数
             join=True)  # 加入同一进程池
