import torch
import torchvision
import torchvision.transforms as transforms
import torch.utils.data.dataloader as dataloader
from torch.utils.data import Subset
import torch.nn as nn
import torch.optim as optim
from torch.nn.parameter import Parameter

import matplotlib.pyplot as plt

from hyl.federalstudyencryptor import FederalStudyEncryptor


def train_and_test(model, train_loader):
    epoches = 20  # 迭代20轮
    lr = 0.01  # 学习率，即步长
    loss_func = nn.CrossEntropyLoss()  # 损失函数的类型：交叉熵损失函数
    optimizer = optim.Adam(model.parameters(), lr=lr)  # Adam优化，也可以用SGD随机梯度下降法
    # optimizer = optim.SGD(model.parameters(), lr=lr)
    for epoch in range(epoches):
        flag = 0
        for images, labels in train_loader:
            images = images.reshape(-1, 28 * 28).to(device)
            labels = labels.to(device)
            output = model(images)

            loss = loss_func(output, labels)
            optimizer.zero_grad()
            loss.backward()  # 误差反向传播，计算参数更新值
            optimizer.step()  # 将参数更新值施加到net的parameters上
            # 以下两步可以看每轮损失函数具体的变化情况
            # if (flag + 1) % 10 == 0:
            # print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, epoches, loss.item()))
            flag += 1
    correct = 0
    total = 0
    for images, labels in test_loader:
        images = images.reshape(-1, 28 * 28).to(device)
        labels = labels.to(device)
        output = model(images)
        values, predicte = torch.max(output, 1)  # 0是每列的最大值，1是每行的最大值
        total += labels.size(0)
        correct += (predicte == labels).sum().item()
    accuracy = 100 * correct / total
    return accuracy


class FederalCalculator:

    # 此处所有的参数都是使用同态加密加密过后的参数，不是真实的参数值。
    # 故这个类只知道算平均值，不知道平均值的真实意义是什么。
    @staticmethod
    def combineParams(para_A, para_B, para_C):
        fc1_wA = para_A[0].data  # Tensor
        fc1_wB = para_B[0].data
        fc1_wC = para_C[0].data

        fc2_wA = para_A[2].data
        fc2_wB = para_B[2].data
        fc2_wC = para_C[2].data

        com_para_fc1 = (fc1_wA + fc1_wB + fc1_wC) / 3
        com_para_fc2 = (fc2_wA + fc2_wB + fc2_wC) / 3
        return com_para_fc1, com_para_fc2


"""
一个子集是一个类，子集只知道自己的噪音和平均噪音是多少。
"""
class MnistSubset:
    def __init__(self, start, end):
        self.__start = start
        self.__end = end
        self.__train_set = Subset(train_set, range(start, end))
        self.__train_loader = dataloader.DataLoader(dataset=self.__train_set, batch_size=1000, shuffle=False)
        self.__accuracies = []
        encryptor.registerSubset(self)

    # 使用噪音进行加密
    def __encrypt(self, raw_params):
        noise = encryptor.getNoise(self)
        for param in raw_params:
            param += noise
        return raw_params

    # 解密
    def __decrypt(self, encrypt1, encrypt2):
        offset = encryptor.getAvgNoise(self)
        raw1 = encrypt1 - offset
        raw2 = encrypt2 - offset
        return raw1, raw2

    # 将准确率的图画出来
    def plot(self):
        plt.plot(self.__accuracies)

    def train_and_encrypt1(self):
        class NeuralNet(nn.Module):
            def __init__(self):
                super(NeuralNet, self).__init__()
                self.fc1 = nn.Linear(input_num, hidden_num)  # 服从正态分布的权重w
                self.fc2 = nn.Linear(hidden_num, output_num)
                nn.init.normal_(self.fc1.weight)
                nn.init.normal_(self.fc2.weight)
                nn.init.constant_(self.fc1.bias, val=0)  # 初始化bias为0
                nn.init.constant_(self.fc2.bias, val=0)
                self.relu = nn.ReLU()  # Relu激励函数

            def forward(self, x):
                x = self.fc1(x)
                x = self.relu(x)
                y = self.fc2(x)
                return y
        model = NeuralNet().to(device)
        accuracy = train_and_test(model, self.__train_loader)
        print("The accuracy of [{}, {}]: {}%".format(self.__start, self.__end, accuracy))
        self.__accuracies.append(accuracy)
        raw_params = list(model.parameters())
        model.requires_grad_(False)
        return self.__encrypt(raw_params)


    def train_and_encrypt2(self, com_para_fc1, com_para_fc2):
        class NeuralNet(nn.Module):
            def __init__(self, com_para_fc1, com_para_fc2):
                super(NeuralNet, self).__init__()
                self.fc1 = nn.Linear(input_num, hidden_num)
                self.fc2 = nn.Linear(hidden_num, output_num)
                self.fc1.weight = Parameter(com_para_fc1)
                self.fc2.weight = Parameter(com_para_fc2)
                nn.init.constant_(self.fc1.bias, val=0)
                nn.init.constant_(self.fc2.bias, val=0)
                self.relu = nn.ReLU()

            def forward(self, x):
                x = self.fc1(x)
                x = self.relu(x)
                y = self.fc2(x)
                return y

        raw1, raw2 = self.__decrypt(com_para_fc1, com_para_fc2)
        model = NeuralNet(raw1, raw2).to(device)
        accuracy = train_and_test(model, self.__train_loader)
        print("The accuracy of [{}, {}]: {}%".format(self.__start, self.__end, accuracy))
        self.__accuracies.append(accuracy)
        raw_params = list(model.parameters())
        model.requires_grad_(False)
        return self.__encrypt(raw_params)


if __name__ == '__main__':
    mnist_path = "./data"
    # mnist_path = "../MachineLearning/mnist"
    input_num = 784
    hidden_num = 12
    output_num = 10
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    encryptor = FederalStudyEncryptor(3)
    encryptor.genNoises()

    train_set = torchvision.datasets.MNIST(root=mnist_path, train=True, transform=transforms.ToTensor(), download=False)
    subset1 = MnistSubset(0, 1000)
    subset2 = MnistSubset(1000, 2000)
    subset3 = MnistSubset(2000, 3000)

    test_set = torchvision.datasets.MNIST(root=mnist_path, train=False, transform=transforms.ToTensor(), download=False)
    test_set = Subset(test_set, range(0, 2000))
    test_loader = dataloader.DataLoader(dataset=test_set, shuffle=True)

    para_A = subset1.train_and_encrypt1()
    para_B = subset2.train_and_encrypt1()
    para_C = subset3.train_and_encrypt1()

    for i in range(10):
        print("Round %d federating" % (i + 1))
        com_para_fc1, com_para_fc2 = FederalCalculator.combineParams(para_A, para_B, para_C)

        para_A = subset1.train_and_encrypt2(com_para_fc1, com_para_fc2)
        para_B = subset2.train_and_encrypt2(com_para_fc1, com_para_fc2)
        para_C = subset3.train_and_encrypt2(com_para_fc1, com_para_fc2)
    subset1.plot()
    subset2.plot()
    subset3.plot()
    plt.title('Federal Training')   # 图的标题，可以自己改
    plt.xlabel('Epoch')             # 图的x坐标的描述文字，可以改
    plt.ylabel('Accuracy')          # 图的y坐标的描述文字，可以改
    plt.show()
