import copy

import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset

from util.utils import init_model


class DatasetSplit(Dataset):
    def __init__(self, dataset, idxs):
        self.dataset = dataset
        self.idxs = list(idxs)

    def __len__(self):
        return len(self.idxs)

    def __getitem__(self, item):
        image, label = self.dataset[self.idxs[item]]
        return image, label


def init_optimizer(model, args):
    optimizer = []
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=5e-4)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
    return optimizer


def weights_zero(model):
    for p in model.parameters():
        if p.data is not None:
            p.data.detach_()
            p.data.zero_()


class Node(object):
    def __init__(self, i, args, dataset_train, dataset_test, dict_users_train, dict_users_test):  # 每个node含有自己的训练数据和验证数据
        self.args = args
        self.num = i + 1
        self.device = self.args.device
        self.local_data = DataLoader(DatasetSplit(dataset_train, dict_users_train), batch_size=self.args.batchsize,
                                     shuffle=True)
        self.validate_set = DataLoader(DatasetSplit(dataset_test, dict_users_test), batch_size=self.args.batchsize,
                                       shuffle=True)
        # 引入模型
        self.customized_model = init_model(self.args.local_model, self.args).to(self.device)
        ######################
        self.optimizer = init_optimizer(self.customized_model, self.args)
        # copy全局模型
        self.copy_meme = init_model(self.args.global_model, self.args).to(self.device)
        self.meme_optimizer = init_optimizer(self.copy_meme, self.args)

    def fork(self, global_node):
        self.copy_meme = copy.deepcopy(global_node.model).to(self.device)
        self.meme_optimizer = init_optimizer(self.copy_meme, self.args)


class GlobalNode(object):
    def __init__(self, args):
        self.num = 0  # 全局模型定义为0
        self.args = args
        self.device = self.args.device
        self.model = init_model(self.args.global_model, self.args).to(self.args.device)
        # if self.args.pretrain == 1 and self.args.global_model == 'CNN':
        #     self.model.load_state_dict(torch.load("./save/pretrain_models/cnn2_cifar100-10_epoch200.pkl", map_location="cuda:0"))
        self.Dict = self.model.state_dict()

    def average(self, node_list):  # FedAvg
        weights_zero(self.model)  # 参数全部清零
        node_state_list = []
        for i in range(len(node_list)):
            node_state_list.append(copy.deepcopy(node_list[i].copy_meme.state_dict()))  # 把所有本地模型的参数放在一起，准备进行平均
        for key in self.Dict.keys():
            for i in range(len(node_list)):
                self.Dict[key] += node_state_list[i][key]
            self.Dict[key] /= len(node_list)

    def test(self, test_loader):  # 测试全局节点在全部测试集的表现
        self.model.to(self.device).eval()
        total_loss = 0.0
        correct = 0.0
        with torch.no_grad():
            for idx, (data, target) in enumerate(test_loader):
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                total_loss += torch.nn.CrossEntropyLoss()(output, target).detach().item()
                pred = output.argmax(dim=1)
                correct += pred.eq(target.view_as(pred)).sum().item()
            total_loss = total_loss / (idx + 1)  # 平均batch loss
            acc = correct / len(test_loader.dataset) * 100
        print("中心节点测试结果：")
        print('&&&&&&&&&&=========loss={:.2f}, acc={:.2f}'.format(total_loss, acc))
        return total_loss, acc
