# 要添加一个新单元，输入 '# %%'
# 要添加一个新的标记单元，输入 '# %% [markdown]'
# 我发现写到最后，还是学弟写的那种样子。好麻烦啊，感觉应该一天就能写完，但自己在硬拖。
import copy
import torch
from torch.utils import data
from trainers.fedbase.FedClientBase import FedClientBase

import logging

# logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)


class FedAmpClient(FedClientBase):
    def __init__(self,datapair,model):
        """创建一个客户端对象。初始化客户端的本地参数

        Args:
            datapair (tuple(ndarray,ndarray)): input,output包括输入输出数据
            model (torch.model): 与客户端一致的模型
        """    
        super(FedAmpClient, self).__init__(datapair,model)    
        logger.info("FedAmpClient init----")

    def set_parameters(self,client_params):
        """从服务器加载训练的参数。并根据batch_size等参数划分数据集。

        Args:
            client_params (dict): 客户端进行训练的相关参数
        """     
        # 训练基本参数(包含记载数据)---------------------------------------------------------------------------------------
        super().set_parameters(client_params)   
        # FedAmpClient策略参数-------------------------------------------------------------------------
        # amp数据集划分参数
        if("support_ratio" in client_params):
            self.support_ratio=client_params["support_ratio"]
        else:
            self.support_ratio = 0.7

        # amb本地更新的权重
        if("amp_lambda" in client_params):
            self.amp_lambda = client_params["amp_lambda"]
        else:
            self.amp_lambda = 0.0
        
        logger.info("FedAmpClient set_parameters:support_ratio={}\t amp_lambda={}\t".format(self.support_ratio,self.amp_lambda))

    
    # TODO：batchsize的影响需要测试
    # TODO：打乱数据进行分割
    def process_data(self):
        """这里将support&&query set都设置为一个batch，相当于one batch梯度下降。\n
        可以考虑使用正常的batch梯度下降。\n
        考虑到一些情况FEMNINST数据集按用户连续给出的值，可以考虑打乱数据进行分割
        """        
        x,y = self.datapair
        assert len(x) == len(y)
        sq_point  = int(len(y)*self.support_ratio)
        # 将数据转换成tensor,为了加快速度，通过共享内存的方式
        x_support_tensor = torch.FloatTensor(x[:sq_point]).to(self.device)
        y_support_tensor = torch.from_numpy(y[:sq_point]).to(self.device)
        x_query_tensor = torch.FloatTensor(x[sq_point:]).to(self.device)
        y_query_tensor = torch.from_numpy(y[sq_point:]).to(self.device)
        # print(x_train_tensor.dtype,y_train_tensor.dtype)
        support_ds = data.TensorDataset(x_support_tensor,y_support_tensor)
        self.train_dl = data.DataLoader(support_ds,batch_size=self.batch_size,shuffle=True)
        query_ds  = data.TensorDataset(x_query_tensor,y_query_tensor)
        self.test_dl = data.DataLoader(query_ds,batch_size=len(query_ds),shuffle=True)


    def train(self):
        """用来训练当前的本地模型。每次训练返回损失值running_loss和running_accuracy\n
        这里的更新方法与prox完全一致，保证模型距离源位置的方向不至于果园

        Returns:
            [loss,accuracy]: 运行的损失和准确率。
        """        
        # 得到最开始parmlist，作为限制权重的基准
        old_parm_list = []
        for parm in self.model.parameters():
            old_parm_list.append(torch.clone(parm).detach())   
        
        # param_state['old_init'] = torch.clone(p.data).detach()
        self.model.train()
        loss_sum = 0.
        loss_times = 0.
        correct_sum = 0.
        total_size = 0
        for epoch in range(self.epochs):
            for batch_x,batch_y in self.train_dl:
                # 正向传播计算准确度
                outputs = self.model(batch_x)
                # 计算损失
                loss = self.loss_func(outputs,batch_y.long())

                # 利用梯度下降过程，完成prox正则化项的计算。在prox中效果中规中矩
                # 经过验证发现通过+=构造的连续的计算图可以正常进行梯度下降
                for i,parm in enumerate(self.model.parameters()):
                    loss += ((1.0/2.0)*self.amp_lambda * (parm-old_parm_list[i])**2).sum()
                
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()
                # 梯度下降
                self.optimizer.step()

                _,predicted = torch.max(outputs,1)
                correct_sum += (predicted == batch_y).sum().item()
                total_size += len(batch_y)
                loss_sum +=loss.item()
                loss_times+=1

        # print(self.optimizer.state_dict())
        return loss_sum/loss_times,100*correct_sum/total_size
          
    def test(self):
        """在本地测试集上进行测试   

        Returns:
            [float]: accuracy测试集上的准确率
        """        
        self.model.eval()
        correct = 0
        total =0
        with torch.no_grad():
            for features,labels in self.test_dl:
                outputs = self.model(features)
                # print(outputs.shape)
                _,predicted = torch.max(outputs,1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

        accuracy = 100*correct/total
        return accuracy