# 要添加一个新单元，输入 '# %%'
# 要添加一个新的标记单元，输入 '# %% [markdown]'
# 我发现写到最后，还是学弟写的那种样子。好麻烦啊，感觉应该一天就能写完，但自己在硬拖。


# %%
import torch
import copy
from torch import nn, optim
from dnn_model import Net
from torch.utils import data 


# 训练相关的参数本来应该是服务器发送过来的，包括模型、参数啥的。训练数据应该是本地的。
# 这里为了方便，只模拟联邦学习的效果和逻辑，不模拟联邦学习的真是过程。
# 1. 数据由服务器分成独立同分布，或者非独立同分布。然后分配给各个客户端。而非客户端在本地读取。
# 2. 训练的模型、参数在客户端配置。
# 3. 客户端完成训练过程、服务端完成聚合过程。客户端和服务端只发送和传输模型和优化器的权重参数。完成聚合和分配的通信逻辑。

class FLClient:
    # 接收参数，完成训练。主要是一次训练的参数。
    # 每一个算法都应该提供梯度的返回值。即在客户端计算出梯度。其实在服务器计算也无可厚非。
    def __init__(self,datapair,client_params):
        # 训练设备、训练数据、训练模型、损失函数----------------------------------------------------------
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.model = Net().to(self.device)
        self.loss_func = nn.CrossEntropyLoss()
        # 优化器
        # 需要进行参数聚合的Adam优化器
        # self.optimizer = optim.Adam(self.model.parameters(),lr=0.001)
        # 不需要进行参数聚合的SGD优化器
        self.optimizer = torch.optim.SGD(self.model.parameters(),lr=0.01)
        # 带有正则化项的Adam优化器
        # self.optimizer = optim.Adam(self.model.parameters(),lr=0.001,weight_decay=0.000001)
        # 带有正则化项的SGD优化器
        # self.optimizer = torch.optim.SGD(self.model.parameters(),lr=0.01,weight_decay=0.000001)

        # prox本地更新的权重
        if("prox_mu" in client_params):
            self.prox_mu = client_params["prox_mu"]
        else:
            self.prox_mu = 0.0
        # 训练参数---------------------------------------------------------------------------------------
        if("epochs" in client_params):
            self.epochs = client_params["epochs"]
        else:
            self.epochs =1
        
        if("batch_size" in client_params):
            self.batch_size = client_params["batch_size"]
        else:
            self.batch_size=128
        

        # 元学习训练的参数
        self.support_ratio = 0.8

        # 个性化层的参数
        self.meta_layer = 4

        # 本地更新的参数，这个参数就用epochs来表示吧。暂时保留
        self.update_steps = 5
        self.inner_lr =0.01 
        self.outter_lr = 0.5

        # 训练数据预处理。将数据转换成tensor，进行预处理，并传递到目标设备
        self.train_dl = self.process_data(datapair)
        self.support_dl,self.query_dl = self.process_data_meta(datapair)
        # 每一个round包含多少个iteration
        self.iteration  = self.epochs*(len(self.train_dl))

        print("FLClient Config:","----device:",self.device,"----iteration",self.iteration,"----epochs:",self.epochs,"----batch_size:",self.batch_size)

    
    def process_data(self,datapair):
        x,y = datapair
        assert len(x) == len(y)
        # 将数据转换成tensor,为了加快速度，通过共享内存的方式
        x_train_tensor = torch.unsqueeze(torch.FloatTensor(x),1).to(self.device)
        y_train_tensor = torch.from_numpy(y).to(self.device)
        # print(x_train_tensor.dtype,y_train_tensor.dtype)
        train_ds = data.TensorDataset(x_train_tensor,y_train_tensor)
        train_dl = data.DataLoader(train_ds,batch_size=self.batch_size,shuffle=True)
        return train_dl

    # 注意，这里将support&&query set都设置为一个batch，相当于one batch梯度下降。
    def process_data_meta(self,datapair):
        x,y = datapair
        assert len(x) == len(y)
        sq_point  = int(len(y)*self.support_ratio)
        # 将数据转换成tensor,为了加快速度，通过共享内存的方式
        x_support_tensor = torch.unsqueeze(torch.FloatTensor(x[:sq_point]),1).to(self.device)
        y_support_tensor = torch.from_numpy(y[:sq_point]).to(self.device)
        x_query_tensor = torch.unsqueeze(torch.FloatTensor(x[sq_point:]),1).to(self.device)
        y_query_tensor = torch.from_numpy(y[sq_point:]).to(self.device)
        # print(x_train_tensor.dtype,y_train_tensor.dtype)
        support_ds = data.TensorDataset(x_support_tensor,y_support_tensor)
        support_dl = data.DataLoader(support_ds,batch_size=len(support_ds),shuffle=True)
        query_ds  = data.TensorDataset(x_query_tensor,y_query_tensor)
        query_dl = data.DataLoader(query_ds,batch_size=len(query_ds),shuffle=True)
        return support_dl,query_dl

    def load_model(self,model_state_dict,optimizer_state_dict):
        self.model.load_state_dict(model_state_dict)
        self.optimizer.load_state_dict(optimizer_state_dict)
    
    # meta load model
    def load_model_meta(self,model_state_dict,optimizer_state_dict):
        # 加载部分参数，还是传递部分参数。传递部分参数过来。加载的时候都是创建一个新的。
        new_par = copy.deepcopy(self.model.state_dict())
        for name in model_state_dict:
            new_par[name]= model_state_dict[name]
        self.model.load_state_dict(new_par)
        self.optimizer.load_state_dict(optimizer_state_dict)
    
    # 用来测试训练的结果返回准确率
    def test(self):
        self.model.eval()
        correct = 0
        total =0
        with torch.no_grad():
            for features,labels in self.train_dl:
                outputs = self.model(features)
                # print(outputs.shape)
                _,predicted = torch.max(outputs,1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

        accuracy = 100*correct/total
        return accuracy


    # 用来训练当前的本地模型。每次训练返回损失值running_loss和running_accuracy
    def train(self):
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0
        for epoch in range(self.epochs):
            for batch_x,batch_y in self.train_dl:
                # 正向传播，计算准确度
                outputs = self.model(batch_x)
                _,predicted = torch.max(outputs,1)
                correct_sum += (predicted == batch_y).sum().item()
                total_size += len(batch_y)
                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                loss_sum +=loss.item()
                loss_times+=1
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()
                # 梯度下降
                self.optimizer.step()

        # print(self.optimizer.state_dict())
        return loss_sum/loss_times,correct_sum/total_size
    
    # 用来训练当前的本地模型。每次训练返回损失值running_loss和running_accuracy。
    # 添加proximate update。在每次更新完成后。
    # 1. 直接更改优化器比较麻烦，尝试对loss_func进行更改。看一下是否能够达到要求
    # 2. 尝试直接修改权重参数
    # 3. 尝试直接继承修改后的优化器
    def train_prox(self):
        # 得到最开始parmlist，作为限制权重的基准
        old_parm_list = []
        for parm in self.model.parameters():
            old_parm_list.append(torch.clone(parm).detach())   
        

        # param_state['old_init'] = torch.clone(p.data).detach()
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0
        for epoch in range(self.epochs):
            for batch_x,batch_y in self.train_dl:
                # 正向传播计算准确度
                outputs = self.model(batch_x)
                _,predicted = torch.max(outputs,1)
                correct_sum += (predicted == batch_y).sum().item()
                total_size += len(batch_y)
                loss = self.loss_func(outputs,batch_y.long())

                # 利用梯度下降过程，完成prox正则化项的计算。
                # 经过验证发现通过+=构造的连续的计算图可以正常进行梯度下降
                for i,parm in enumerate(self.model.parameters()):
                    loss += (1.0/2.0*self.prox_mu * (parm-old_parm_list[i])**2).sum()
                
                loss_sum +=loss.item()
                loss_times+=1
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()
                # 梯度下降
                self.optimizer.step()
        # print(self.optimizer.state_dict())
        return loss_sum/loss_times,correct_sum/total_size



   # 用来训练当前的本地模型。每次训练返回损失值running_loss和running_accuracy
    def train_meta(self):
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0        
        # 冻结meta层，训练local层。这里可以进行优化，将这样的两个优化器直接放到init中。所有的参数require_grade都是true。但是optimizer只更新部分参数。
        for i,parm in enumerate(self.model.parameters()):
            if(i<self.meta_layer):
                parm.requires_grad_(False)
            else:
                parm.requires_grad=True
        self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),lr=0.01)

        for epoch in range(self.epochs):
            # 使用support_set进行梯度下降
            for batch_x,batch_y in self.support_dl:
                # 正向传播
                outputs = self.model(batch_x)

                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                loss_sum +=loss.item()
                loss_times+=1
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()

                # 梯度下降
                self.optimizer.step()
            
        # 开启meta层，冻结local层。使用query_set进行一次梯度下降
        for i,parm in enumerate(self.model.parameters()):
            if(i<self.meta_layer):
                parm.requires_grad=True
            else:
                parm.requires_grad=False
        self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),lr=0.01)

        for batch_x,batch_y in self.query_dl:
            # 正向传播
            outputs = self.model(batch_x)
            _,predicted = torch.max(outputs,1)
            correct_sum += (predicted == batch_y).sum().item()
            total_size += len(batch_y)
            # 计算loss
            loss = self.loss_func(outputs,batch_y.long())
            loss_sum +=loss.item()
            loss_times+=1
            # 反向传播
            self.optimizer.zero_grad()
            loss.backward()

            # 梯度下降
            self.optimizer.step()
        # print(self.optimizer.state_dict())
        # 解冻所有的层
        for parm in self.model.parameters():
            parm.requires_grad=True
        self.optimizer = torch.optim.SGD(self.model.parameters(),lr=0.01)

        return loss_sum/loss_times,correct_sum/total_size
    
    # 元数据，首先进行本地的fine_tuning
    def test_meta(self):
        self.model.train()
        # 冻结meta层，训练local层。这里可以进行优化，将这样的两个优化器直接放到init中。所有的参数require_grade都是true。但是optimizer只更新部分参数。
        for i,parm in enumerate(self.model.parameters()):
            if(i<self.meta_layer):
                parm.requires_grad_(False)
            else:
                parm.requires_grad=True
        self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),lr=0.01)

        for epoch in range(self.epochs+10):
            # 使用support_set进行梯度下降
            for batch_x,batch_y in self.support_dl:
                # 正向传播
                outputs = self.model(batch_x)

                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()

                # 梯度下降
                self.optimizer.step()
        # 解冻所有的层
        for parm in self.model.parameters():
            parm.requires_grad=True
        self.optimizer = torch.optim.SGD(self.model.parameters(),lr=0.01)

        self.model.eval()
        correct = 0
        total =0
        with torch.no_grad():
            for features,labels in self.query_dl:
                outputs = self.model(features)
                # print(outputs.shape)
                _,predicted = torch.max(outputs,1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

        accuracy = 100*correct/total
        return accuracy


    # 用来训练当前的本地模型。每次训练返回损失值running_loss和running_accuracy
    def train_maml(self):
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0        
        # 保留原始梯度的一个副本
        model_state_dict = copy.deepcopy(self.model.state_dict())
        for epoch in range(self.epochs):
            # 使用support_set进行梯度下降
            for batch_x,batch_y in self.support_dl:
                # 正向传播
                outputs = self.model(batch_x)

                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                loss_sum +=loss.item()
                loss_times+=1
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()

                # 梯度下降
                self.optimizer.step()
            
        # 使用query_set进行一次梯度下降
        for batch_x,batch_y in self.query_dl:
            # 正向传播
            outputs = self.model(batch_x)
            _,predicted = torch.max(outputs,1)
            correct_sum += (predicted == batch_y).sum().item()
            total_size += len(batch_y)
            # 计算loss
            loss = self.loss_func(outputs,batch_y.long())
            loss_sum +=loss.item()
            loss_times+=1
            # 反向传播
            self.optimizer.zero_grad()
            loss.backward()

            # 梯度下降，在下降前加载初始的参数状态
            self.model.load_state_dict(model_state_dict)
            self.optimizer.step()
        # print(self.optimizer.state_dict())
        return loss_sum/loss_times,correct_sum/total_size

    # grad版本的maml
    def train_maml_by_grad(self):
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0        
        # 保留原始梯度的一个副本
        model_state_dict = copy.deepcopy(self.model.state_dict())
        for epoch in range(self.epochs):
            # 使用support_set进行梯度下降
            for batch_x,batch_y in self.support_dl:
                # 正向传播
                outputs = self.model(batch_x)

                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                loss_sum +=loss.item()
                loss_times+=1
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()

                # 梯度下降
                self.optimizer.step()
            
        # 使用query_set进行一次梯度下降
        for batch_x,batch_y in self.query_dl:
            # 正向传播
            outputs = self.model(batch_x)
            _,predicted = torch.max(outputs,1)
            correct_sum += (predicted == batch_y).sum().item()
            total_size += len(batch_y)
            # 计算loss
            loss = self.loss_func(outputs,batch_y.long())
            loss_sum +=loss.item()
            loss_times+=1
            # 反向传播
            self.optimizer.zero_grad()
            self.grad_dict = torch.autograd.grad(loss,self.model.parameters(),create_graph=True, retain_graph=True)
        # print(self.optimizer.state_dict())
        return loss_sum/loss_times,correct_sum/total_size

    # 进行finetuning之后测试
    def test_maml(self):
        self.model.train()
        # 冻结meta层，训练local层。这里可以进行优化，将这样的两个优化器直接放到init中。所有的参数require_grade都是true。但是optimizer只更新部分参数。
        for epoch in range(self.epochs+10):
            # 使用support_set进行梯度下降
            for batch_x,batch_y in self.support_dl:
                # 正向传播
                outputs = self.model(batch_x)

                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()

                # 梯度下降
                self.optimizer.step()
        # 进入测试
        self.model.eval()
        correct = 0
        total =0
        with torch.no_grad():
            for features,labels in self.query_dl:
                outputs = self.model(features)
                # print(outputs.shape)
                _,predicted = torch.max(outputs,1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

        accuracy = 100*correct/total
        return accuracy


    # 用来训练当前的本地模型。每次训练返回损失值running_loss和running_accuracy
    def train_reptile(self):
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0        
        # 保留原始梯度的一个副本
        model_state_dict = copy.deepcopy(self.model.state_dict())
        for epoch in range(self.epochs):
            # 使用support_set进行梯度下降
            for batch_x,batch_y in self.train_dl:
                # 正向传播
                outputs = self.model(batch_x)
                _,predicted = torch.max(outputs,1)
                correct_sum += (predicted == batch_y).sum().item()
                total_size += len(batch_y)
                
                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                loss_sum +=loss.item()
                loss_times+=1
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()

                # 梯度下降
                self.optimizer.step()
            
        # reptile不需要进行queryset的梯度下降。而是使用outer_lr进行放缩。这个放缩与maml一样，放到服务器上。

        # print(self.optimizer.state_dict())
        return loss_sum/loss_times,correct_sum/total_size

    # 用来训练当前的本地模型。每次训练返回损失值running_loss和running_accuracy
    def train_reptile_by_grad(self):
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0        
        # 保留原始梯度的一个副本
        model_state_dict = copy.deepcopy(self.model.state_dict())
        for epoch in range(self.epochs):
            # 使用support_set进行梯度下降
            for batch_x,batch_y in self.train_dl:
                # 正向传播
                outputs = self.model(batch_x)
                _,predicted = torch.max(outputs,1)
                correct_sum += (predicted == batch_y).sum().item()
                total_size += len(batch_y)
                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                loss_sum +=loss.item()
                loss_times+=1
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()

                # 梯度下降
                self.optimizer.step()
            
        # reptile不需要进行queryset的梯度下降。而是使用outer_lr进行放缩。这个放缩与maml一样，放到服务器上。
        self.grad_dict={}
        new_state_dict = self.model.state_dict()
        for name in model_state_dict:
            self.grad_dict[name]=new_state_dict[name]-model_state_dict[name]

        # print(self.optimizer.state_dict())
        return loss_sum/loss_times,correct_sum/total_size



   # 用来训练当前的本地模型。每次训练返回损失值running_loss和running_accuracy
    def train_per(self):
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0        

        for epoch in range(self.epochs):
            # 使用support_set进行梯度下降
            for batch_x,batch_y in self.train_dl:
                # 正向传播
                outputs = self.model(batch_x)
                _,predicted = torch.max(outputs,1)
                correct_sum += (predicted == batch_y).sum().item()
                total_size += len(batch_y)
                # 计算loss
                loss = self.loss_func(outputs,batch_y.long())
                loss_sum +=loss.item()
                loss_times+=1
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()

                # 梯度下降
                self.optimizer.step()
        return loss_sum/loss_times,correct_sum/total_size
    

    # 返回当前的模型
    def get_model_state_dict(self):
        return self.model.state_dict()

    # 返回当前的优化器
    def get_optim_state_dict(self):
        return self.optimizer.state_dict()

    def get_model_state_dict_meta(self):
        state_dict =  self.model.state_dict()
        meta_dict = {key:state_dict[key] for i,key in enumerate(state_dict)  if i < self.meta_layer}
        return meta_dict

    def get_grad_dict(self):
        return copy.deepcopy(self.grad_dict)
