import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch

import models
from models import LeNet5, CNNCifar10, ResNet18, CNNCifar100


def print_message(args):
    print("实验配置：\n")
    print("algorithm:{},\tiid:{}".format(args.algorithm, args.iid == 1))
    print("dataset:{}\tbatchsize:{}\n".format(args.dataset, args.batchsize))
    print("num_users:{},\tfraction:{}\n".format(args.num_users, args.frac))
    # print("iid:{},\tequal:{},\n".format(args.iid == 1, args.unequal == 0))

    print("global epochs:{},\tlocal epochs:{},\n".format(args.R, args.E))
    print("global_model:{}，\tlocal customized_model:{},\n".format(args.global_model, args.local_model))


def init_model(model_type, args):
    model = []
    if model_type == 'LeNet5':
        model = models.LeNet5()
    elif model_type == 'CNN':
        if args.dataset == 'cifar10':
            model = models.CNNCifar10()
        else:
            model = models.CNNCifar100()
    elif model_type == 'ResNet18':
        model = models.ResNet18()
    elif model_type == 'mlp':
        model = models.MLP()
    elif model_type == 'CNN_fedmd':
        model = models.CNN_fedmd()

    return model


class Recorder(object):
    def __init__(self, args):
        self.args = args
        self.counter = 0
        self.train_loss = {}
        self.train_acc = {}
        self.train_meme_loss = {}
        self.train_meme_acc = {}
        self.val_meme_loss = {}
        self.val_meme_acc = {}
        self.val_loss = {}
        self.val_acc = {}
        self.test_meme_loss = {}
        self.test_meme_acc = {}
        self.test_loss = {}
        self.test_acc = {}
        for i in range(self.args.num_users + 1):
            self.train_loss[str(i)] = []
            self.train_acc[str(i)] = []

            self.train_meme_loss[str(i)] = []
            self.train_meme_acc[str(i)] = []

            self.val_meme_loss[str(i)] = []
            self.val_meme_acc[str(i)] = []

            self.val_loss[str(i)] = []
            self.val_acc[str(i)] = []

            self.test_meme_loss[str(i)] = []
            self.test_meme_acc[str(i)] = []

            self.test_loss[str(i)] = []
            self.test_acc[str(i)] = []
        # self.acc_best = torch.zeros(self.args.node_num + 1)  # n+1个全0的tensor
        # self.get_a_better = torch.zeros(self.args.node_num + 1)

    def test_meme(self, node, testset):
        node.copy_meme.to(node.device).eval()  # copy_meme
        test_loader = testset
        total_loss = 0.0
        correct = 0.0
        with torch.no_grad():
            for idx, (data, target) in enumerate(test_loader):
                data, target = data.to(node.device), target.to(node.device)
                output = node.copy_meme(data)
                total_loss += torch.nn.CrossEntropyLoss()(output, target).detach().item()
                pred = output.argmax(dim=1)
                correct += pred.eq(target.view_as(pred)).sum().item()

            total_loss = total_loss / (idx + 1)
            acc = correct / len(test_loader.dataset) * 100
        self.test_meme_loss[str(node.num)].append(total_loss)
        self.test_meme_acc[str(node.num)].append(acc)
        print('test_meme,loss={:.2f}, acc={:.2f}'.format(total_loss, acc))

    def test_local(self, node):
        node.customized_model.to(node.device).eval()
        total_loss = 0.0
        correct = 0.0
        with torch.no_grad():
            for idx, (data, target) in enumerate(node.validate_set):
                data, target = data.to(node.device), target.to(node.device)
                output = node.customized_model(data)
                total_loss += torch.nn.CrossEntropyLoss()(output, target).detach().item()
                pred = output.argmax(dim=1)
                correct += pred.eq(target.view_as(pred)).sum().item()
            total_loss = total_loss / (idx + 1)
            acc = correct / len(node.validate_set.dataset) * 100

        self.test_loss[str(node.num)].append(total_loss)
        self.test_acc[str(node.num)].append(acc)
        print('test_local,loss={:.2f}, acc={:.2f}'.format(total_loss, acc))

    # 测试各个本地模型在本地验证集的表现
    def validate_local(self, node):
        node.customized_model.to(node.device).eval()  # copy_meme
        test_loader = node.validate_set
        total_loss = 0.0
        correct = 0.0
        with torch.no_grad():
            for idx, (data, target) in enumerate(test_loader):
                data, target = data.to(node.device), target.to(node.device)
                output = node.customized_model(data)
                total_loss += torch.nn.CrossEntropyLoss()(output, target).detach().item()
                pred = output.argmax(dim=1)
                correct += pred.eq(target.view_as(pred)).sum().item()


            total_loss = total_loss / (idx + 1)
            acc = correct / len(test_loader.dataset) * 100
            print("*******\t", correct, "\t", len(test_loader.dataset))
        self.val_loss[str(node.num)].append(total_loss)
        self.val_acc[str(node.num)].append(acc)
        print('validate_local,loss={:.2f}, acc={:.2f}'.format(total_loss, acc))

    # 测试各个本地节点的全局模型在本地验证集的表现
    def validate_meme(self, node):
        node.copy_meme.to(node.device).eval()  # copy_meme
        total_loss = 0.0
        correct = 0.0
        testset = node.validate_set
        with torch.no_grad():
            for idx, (data, target) in enumerate(testset):
                data, target = data.to(node.device), target.to(node.device)
                output = node.copy_meme(data)
                total_loss += torch.nn.CrossEntropyLoss()(output, target).detach().item()
                pred = output.argmax(dim=1)
                correct += pred.eq(target.view_as(pred)).sum().item()

            total_loss = total_loss / (idx + 1)
            acc = correct / len(testset.dataset) * 100

            print("*******\t", correct, "\t", len(testset.dataset))
        self.val_meme_loss[str(node.num)].append(total_loss)
        self.val_meme_acc[str(node.num)].append(acc)
        print('validate_meme,loss={:.2f}, acc={:.2f}'.format(total_loss, acc))

    def finish(self):
        # torch.save([self.val_loss, self.val_acc],
        #            'mutualval_la_{:s}.pt'.format(self.args.algorithm + str(self.args.alpha) + str(self.args.beta)))
        print("R epochs Finished!\n")
        for i in range(self.args.node_num + 1):
            print("Node{}: Best Accuracy = {:.2f}%".format(i, self.acc_best[i]))


def lr_scheduler(rounds, node_list, args):
    trigger = int(args.R / 3)
    # if rounds != 0 and rounds % trigger == 0 and rounds < 40:
    if rounds !=0 and rounds % 33 == 0 and rounds <=100:
        # args.lr *= 0.1
        # args.lr *= 0.6
        args.alpha += 0.2
        # args.beta += 0.4
        for i in range(len(node_list)):
            # node_list[i].args.lr = args.lr
            node_list[i].args.alpha = args.alpha
            # node_list[i].args.beta = args.beta
            # node_list[i].optimizer.param_groups[0]['lr'] = args.lr
            # node_list[i].meme_optimizer.param_groups[0]['lr'] = args.lr
    # if rounds != 0 and rounds % trigger == 0 and rounds < args.stop_decay:
    #     args.lr *= 0.1
    #     # args.lr *= 0.6
    #     # args.alpha += 0.2
    #     # args.beta += 0.4
    #     for i in range(len(node_list)):
    #         node_list[i].args.lr = args.lr
    #         # node_list[i].args.alpha = args.alpha
    #         # node_list[i].args.beta = args.beta
    #         node_list[i].optimizer.param_groups[0]['lr'] = args.lr
    #         node_list[i].meme_optimizer.param_groups[0]['lr'] = args.lr

    # if rounds == 49:
    #     torch.save(node_list[0], './modely.pkl')  # 无领头羊情况
    #     # torch.save(node_list[1], './model2.pkl')

    print('Learning rate={:.4f}'.format(args.lr))
