from copy import deepcopy
import numpy as np
import torch.utils.data
from torch import nn, optim
from tqdm import tqdm

from server_client import copy_model_params


class Client(object):
    def __init__(self, conf, model, device, train_loader, id=1):
        self.client_id = id  # 客户端ID
        self.conf = conf  # 配置文件
        self.local_model = deepcopy(model)  # 客户端本地模型
        self.train_loader = train_loader  # 训练数据的迭代器，需要训练的数据已经在里面了
        self.grad_update = dict()  # 本地训练完之后的梯度更新
        self.weight = conf['weight']  # 全局模型梯度更新时的权重
        self.device = device  # 训练的设备
        self.local_model.to(self.device)  # 将模型放入训练设备

    def train(self, model, local_epoch=1):
        self._before_train(model)
        self._local_train(local_epoch)
        self._after_train(model)

    def _before_train(self, model):
        self._load_global_model(model)

    # 用服务器模型来覆盖本地模型
    def _load_global_model(self, model):
        copy_model_params(self.local_model, model)

    def _local_train(self, global_epoch):
        # 定义损失函数和优化器
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(self.local_model.parameters(), lr=self.conf['lr'])
        # 本地模型训练
        self.local_model.train()
        running_loss = 0.0
        for epoch in range(global_epoch):
            for i, data in enumerate(
                    tqdm(self.train_loader, desc=f"client_{self.client_id}_local_epoch_{epoch}", unit="batch")):
                inputs, labels = data
                inputs, labels = inputs.to(self.device), labels.to(self.device)  # 将数据移动到GPU
                optimizer.zero_grad()

                outputs = self.local_model(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                running_loss += loss.item()
            avg_loss = running_loss / len(self.train_loader)
            print(f'Epoch {epoch + 1}, Loss: {avg_loss}')

    def _after_train(self, model):
        self._cal_update_weights(model)

    def _cal_update_weights(self, old_model):
        weight_updates = dict()
        for layer_name, params in self.local_model.state_dict().items():
            weight_updates[layer_name] = params - old_model.state_dict().get(layer_name)
        # 更新梯度模型的权重
        self.grad_update = weight_updates
