import copy
import json
import os

import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm

from Aggregation.BaseAggregation import BaseAggregation
from utils.utils import DatasetSplit, test


class Scaffold(BaseAggregation):
    """
        Scaffold
    """
    def __init__(self, config, train_dataset, test_dataset, user_groups, traindata_cls_counts):
        super(Scaffold, self).__init__(config, train_dataset, test_dataset, user_groups, traindata_cls_counts)
        self.control_global = copy.deepcopy(self.global_model).to(self.config.device)
        self.control_weights = self.control_global.state_dict()
        # model for local control varietes
        self.local_controls = [copy.deepcopy(self.global_model) for i in range(self.config.num_users)]
        self.m = max(int(self.config.frac * self.config.num_users), 1)

    def train(self):
        print("begin train...")

        # initiliase total delta to 0 (sum of all control_delta, triangle Ci)
        self.delta_c = copy.deepcopy(self.global_model.state_dict())
        # sum of delta_y / sample size
        self.delta_x = copy.deepcopy(self.global_model.state_dict())

        for epoch in range(1, self.config.epochs + 1):
            for ci in self.delta_c:
                self.delta_c[ci] = 0.0
            for ci in self.delta_x:
                self.delta_x[ci] = 0.0

            # 选择训练的客户端
            choose_user = int(self.config.frac * self.config.num_users)
            assert choose_user > 0, "choose_user > 0"
            choose_user_ids = np.random.choice(self.config.num_users, choose_user, replace=False)

            # 开始训练本地客户端
            self.client_loss_data = []
            self.local_train(choose_user_ids, epoch-1)

            # # 执行聚合算法，更新参数
            # global_w = copy.deepcopy(self.update_weights(epoch))
            # self.global_model.load_state_dict(global_w)
            self.update_weights(epoch-1)

            # 验证
            acc, test_loss = test(self.global_model, self.test_loader, self.config)
            print(f"global epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
            self.loss_data.append(
                {"global_epoch": epoch, "acc": acc, "test_loss": test_loss, "clients": self.client_loss_data})

            # 判断目录是否存在
            if not os.path.exists(self.config.save_dir):
                # 目录不存在，创建目录
                os.makedirs(self.config.save_dir)
            # 保存全局模型
            torch.save(self.global_model,
                       f"{self.config.save_dir}/global_model_{self.config.aggregation}_{epoch}.pth")

        with open(f'{self.config.dataset}_{self.config.aggregation}_'
                  f'{self.config.partition}_{self.config.model}_'
                  f'{self.config.beta}.json', 'w') as f:
            json.dump(self.loss_data, f)

    def local_train(self, choose_user_ids, global_round):
        """
        本地客户端训练
        :param choose_user_ids:
        :return:
        """
        for client_id in choose_user_ids:
            control_local = self.local_controls[client_id]
            control_global = self.control_global

            model = self.net[client_id]
            model.train()

            train_loader = DataLoader(DatasetSplit(self.train_dataset, self.user_groups[client_id]),
                                      batch_size=self.config.local_bs, shuffle=True, num_workers=4)
            # optimizer = self.get_optimizer(model)
            pbar = tqdm(range(self.config.local_ep), desc='LocalTrain', unit='item')

            global_model = copy.deepcopy(self.global_model).to(self.config.device)
            global_weights = global_model.state_dict()

            decay = self.config.decay
            if decay != 0:
                learn_rate = self.config.lr * pow(decay, global_round)
            else:
                learn_rate = self.config.lr
            # optimizer = torch.optim.SGD(model.parameters(), lr=(learn_rate),
            #                             momentum=0.9, weight_decay=0.00001)
            # Set optimizer for the local updates
            if self.config.optimizer.lower().strip() == 'sgd':
                optimizer = torch.optim.SGD(model.parameters(), lr=(learn_rate),
                                            momentum=0.9, weight_decay=0.00001)
            elif self.config.optimizer.lower().strip() == 'adam':
                optimizer = torch.optim.Adam(model.parameters(), lr=(learn_rate),
                                             weight_decay=1e-4)

            control_global_w = control_global.state_dict()
            control_local_w = control_local.state_dict()

            count = 0
            for i, epoch in enumerate(pbar):

                for idx, (x, y) in enumerate(train_loader):
                    x, y = x.to(self.config.device), y.to(self.config.device)
                    model.zero_grad()
                    # ---------------------------------------
                    output = model(x)
                    loss = torch.nn.functional.cross_entropy(output, y)

                    # if epoch > 0:
                    #     for w, w_t in zip(local_net.parameters(), model.parameters()):
                    #         loss += self.config.mu / 2. * torch.pow(torch.norm(w.data - w_t.data), 2)

                    # ---------------------------------------
                    loss.backward()
                    optimizer.step()

                    local_weights = model.state_dict()
                    for w in local_weights:
                        # line 10 in algo
                        local_weights[w] = local_weights[w] - self.config.lr * (control_global_w[w] - control_local_w[w])

                    # update local model params
                    model.load_state_dict(local_weights)

                    count += 1

            acc, test_loss = test(model, self.test_loader, self.config)
            pbar.set_postfix(
                {"client_id": client_id, "epoch": epoch, "acc": acc, "test_loss": test_loss},
                 refresh=True)
            print(f"client_id: {client_id}, epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
            self.client_loss_data.append({"client_id": int(client_id), "acc": acc, "test_loss": test_loss})

            new_control_local_w = control_local.state_dict()
            control_delta = copy.deepcopy(control_local_w)
            # model_weights -> y_(i)
            model_weights = model.state_dict()
            local_delta = copy.deepcopy(model_weights)
            for w in model_weights:
                # line 12 in algo
                new_control_local_w[w] = new_control_local_w[w] - control_global_w[w] + (
                            global_weights[w] - model_weights[w]) / (count * self.config.lr)
                # line 13
                control_delta[w] = new_control_local_w[w] - control_local_w[w]
                local_delta[w] -= global_weights[w]
            # update new control_local model
            # control_local.load_state_dict(new_control_local_w)

        if global_round != 0:
            self.local_controls[client_id].load_state_dict(new_control_local_w)

        # local_weights.append(copy.deepcopy(weights))
        # local_losses.append(copy.deepcopy(loss))

        # line16
        weights = model_weights
        for w in self.delta_c:
            if global_round == 0:
                self.delta_x[w] += weights[w]
            else:
                self.delta_x[w] += local_delta[w]
                self.delta_c[w] += control_delta[w]

        # # clean
        # gc.collect()
        # torch.cuda.empty_cache()

        return control_delta, local_delta, new_control_local_w

    def update_weights(self, epoch):
        # update the delta C (line 16)
        for w in self.delta_c:
            self.delta_c[w] /= self.m
            self.delta_x[w] /= self.m

        # update global control variate (line17)
        control_global_W = self.control_global.state_dict()
        global_weights = self.global_model.state_dict()
        # equation taking Ng, global step size = 1
        for w in control_global_W:
            # control_global_W[w] += delta_c[w]
            if epoch == 0:
                global_weights[w] = self.delta_x[w]
            else:
                if global_weights[w].dtype != torch.float32:
                    global_weights[w] = global_weights[w].to(torch.float32)
                global_weights[w] += self.delta_x[w]

                if control_global_W[w].dtype != torch.float32:
                    control_global_W[w] = control_global_W[w].to(torch.float32)
                control_global_W[w] += (self.m / self.config.num_users) * self.delta_c[w]

        # update global model
        self.control_global.load_state_dict(control_global_W)
        self.global_model.load_state_dict(global_weights)