
import copy
import json

import numpy as np
import torch

from draw.draw_main import draw_model
from models.Models import get_model
from utils.utils import DatasetSplit, test
from torch.utils.data import DataLoader
from tqdm import tqdm
import os


class BaseAggregation():
    def __init__(self, config, train_dataset, test_dataset, user_groups, traindata_cls_counts):
        self.loss_data = []
        self.client_loss_data = []

        self.config = config
        self.train_dataset = train_dataset
        self.test_dataset = test_dataset
        self.test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.config.local_bs,
                                              shuffle=False, num_workers=4)
        self.user_groups = user_groups
        self.traindata_cls_counts = traindata_cls_counts
        # 初始化全局模型
        self.global_model = get_model(self.config)
        self.net = []
        for i in range(self.config.num_users):
            self.net.append(copy.deepcopy(self.global_model))

    def update_weights(self, choose_user_ids=None):
        """
        Returns the average of the weights.
        """
        with torch.no_grad():
            if choose_user_ids is None:
                w = self.net
                w_avg = copy.deepcopy(w[0].state_dict())
                for key in w_avg.keys():
                    for i in range(1, len(w)):
                        w_avg[key] += w[i].state_dict()[key]
                    if 'num_batches_tracked' in key:
                        w_avg[key] = w_avg[key].true_divide(len(w))
                    else:
                        w_avg[key] = torch.div(w_avg[key], len(w))
                return w_avg

            w = self.net
            w_avg = copy.deepcopy(w[choose_user_ids[0]].state_dict())
            for key in w_avg.keys():
                for i in range(1, len(choose_user_ids)):
                    w_avg[key] += w[choose_user_ids[i]].state_dict()[key]
                if 'num_batches_tracked' in key:
                    w_avg[key] = w_avg[key].true_divide(len(choose_user_ids))
                else:
                    w_avg[key] = torch.div(w_avg[key], len(choose_user_ids))
        return w_avg

    def train(self):
        print("begin train...")
        for epoch in range(1, self.config.epochs + 1):
            # 选择训练的客户端
            choose_user = int(self.config.frac * self.config.num_users)
            assert choose_user > 0, "choose_user > 0"
            choose_user_ids = np.random.choice(self.config.num_users, choose_user, replace=False)

            # 开始训练本地客户端
            self.client_loss_data = []
            self.local_train(choose_user_ids)

            # 执行聚合算法，更新参数
            global_w = copy.deepcopy(self.update_weights())
            self.global_model.load_state_dict(global_w)

            # # 画图
            # if epoch == 30:
            #     draw_model(self.net)
            #     break

            # # 分发模型
            # self.distribute_model()

            # 验证
            acc, test_loss = test(self.global_model, self.test_loader, self.config)
            print(f"global epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
            self.loss_data.append(
                {"global_epoch": epoch, "acc": acc, "test_loss": test_loss, "clients": self.client_loss_data})


            # 判断目录是否存在
            if not os.path.exists(self.config.save_dir):
                # 目录不存在，创建目录
                os.makedirs(self.config.save_dir)
            # 保存全局模型
            torch.save(self.global_model,
                       f"{self.config.save_dir}/global_model_{self.config.aggregation}_{epoch}.pth")

        if not os.path.exists('results'):
            os.makedirs('results')
        with open(f'results/{self.config.dataset}_{self.config.aggregation}_'
                  f'{self.config.partition}_{self.config.model}_'
                  f'{self.config.beta}.json', 'w') as f:
            json.dump(self.loss_data, f)

    def local_train(self, choose_user_ids):
        """
        本地客户端训练
        :param choose_user_ids:
        :return:
        """
        for client_id in choose_user_ids:
            model = self.net[client_id]
            model.train()

            train_loader = DataLoader(DatasetSplit(self.train_dataset, self.user_groups[client_id]),
                                      batch_size=self.config.local_bs, shuffle=True, num_workers=4)
            optimizer = self.get_optimizer(model)
            pbar = tqdm(range(self.config.local_ep), desc='LocalTrain', unit='item')
            for i, epoch in enumerate(pbar):
                for idx, (x, y) in enumerate(train_loader):
                    x, y = x.to(self.config.device), y.to(self.config.device)
                    model.zero_grad()
                    # ---------------------------------------
                    output = model(x)
                    loss = torch.nn.functional.cross_entropy(output, y)
                    # ---------------------------------------
                    loss.backward()
                    optimizer.step()

            acc, test_loss = test(model, self.test_loader, self.config)
            pbar.set_postfix(
                {"client_id": client_id, "epoch": epoch, "acc": acc, "test_loss": test_loss},
                refresh=True)
            print(f"client_id: {client_id}, epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
            self.client_loss_data.append({"client_id": int(client_id), "acc": acc, "test_loss": test_loss})

    def get_optimizer(self, model):
        optimizer = None
        if self.config.optimizer == "SGD":
            optimizer = torch.optim.SGD(model.parameters(), lr=self.config.lr, momentum=self.config.momentum)

        return optimizer

    def distribute_model(self):
        global_weights = self.global_model.state_dict()
        for local_net in self.net:
            local_net.load_state_dict(copy.deepcopy(global_weights))
