#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import time
import torch
from torch.backends import cudnn
from torch.utils.data import DataLoader

from dataset.datasets import get_data
from nodes import GlobalNode, Node
from util.options import args_parser
from util.utils import print_message, Recorder, lr_scheduler
import numpy as np
import random


# import tensorboard_logger as tb_logger

def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    cudnn.deterministic = True


if __name__ == '__main__':
    # 设置随机种子
    # logger = tb_logger.Logger(logdir='logger', flush_secs=2)
    setup_seed(2020)
    start = time.time()
    args = args_parser()
    print_message(args)  # 打印配置信息
    device = torch.device(args.device)  # 指定gpu
    # 获取数据
    dataset_train, dataset_test, dict_users_train, dict_users_test = get_data(args)
    test_dataloader = data_loader = DataLoader(dataset_test, batch_size=args.batchsize)
    if args.algorithm == 'fed_mutual':
        from methods.fed_mutual import train
    elif args.algorithm == 'fed_avg':
        from methods.fed_avg import train
    # # init nodes
    global_node = GlobalNode(args)
    node_list = []
    for i in range(args.num_users):
        node_list.append(Node(i, args, dataset_train, dataset_test, dict_users_train[i], dict_users_test[i]))
    # 打印各个节点的模型信息
    for i in range(args.num_users):
        print("Node:{},customized_model:{}".format(i, type(node_list[i].customized_model)))
    print("\nGlobal model:{}".format(type(global_node.model)))

    recorder = Recorder(args)
    loss = []
    acc = []
    best_acc = None
    for rounds in range(args.R):
        print('===============The {:d}-th round==============='.format(rounds + 1))
        lr_scheduler(rounds, node_list, args)
        # 对各个本地节点训练
        for i in range(len(node_list)):
            node_list[i].fork(global_node)  # 模型下发，每个本地节点copy一份全局模型
            # # 测试下全局
            # recorder.test_local(node_list[i], data.test_loader)
            # recorder.test_meme(node_list[i], data.test_loader)
            # recorder.validate_local(node_list[i])  # 本地节点查看本地效果
            # recorder.validate_meme(node_list[i])  # meme查看本地效果
            for epoch in range(args.E):  # 本地蒸馏次数
                train(node_list[i], recorder)
        global_node.average(node_list)  # 模型聚合
        # 此时一个epoch已经结束
        # 全局节点的测试集acc和loss
        loss_global, acc_global = global_node.test(test_dataloader)
        # logger.log_value('test_acc', acc_global, rounds)
        loss.append(loss_global)  # 保存meme聚合后测试的acc和loss
        acc.append(acc_global)
        if best_acc is None or acc > best_acc:
            best_acc = acc
            best_epoch = iter
    #
    # R个epoch训练结束，打印模型测试的结果
    print('Best model, iter: {}, acc: {}'.format(best_epoch, best_acc))

    print_message(args)

    for i in range(args.num_users):
        print("Node:{},customized_model:{}".format(i, type(node_list[i].customized_model)))
    print("Global model:{}".format(type(global_node.model)))
    end = time.time()
    print("运行时长{}".format(end - start))

    # # 保存模型
    # for i in range(args.num_users):
    #     torch.save(node_list[i].customized_model.state_dict(), './save/{}/{}/Node{}.pkl'.format(args.num, args.algorithm, i))
    #
    # torch.save(global_node.customized_model.state_dict(), './save/{}/{}/copy_meme.pkl'.format(args.num, args.algorithm))
    # # 保存本地节点验证集的acc和loss
    # for i in range(args.num_users + 1):
    #     np.save('./save/{}/{}/train_loss_{}.npy'.format(args.num, args.algorithm, i), recorder.train_loss[str(i)])
    #     np.save('./save/{}/{}/train_meme_loss_{}.npy'.format(args.num, args.algorithm, i),
    #             recorder.train_meme_loss[str(i)])
    #     np.save('./save/{}/{}/val_loss_{}.npy'.format(args.num, args.algorithm, i), recorder.val_loss[str(i)])
    #     np.save('./save/{}/{}/val_meme_loss_{}.npy'.format(args.num, args.algorithm, i), recorder.val_meme_loss[str(i)])
    #     np.save('./save/{}/{}/test_loss_{}.npy'.format(args.num, args.algorithm, i), recorder.test_loss[str(i)])
    #     np.save('./save/{}/{}/test_meme_loss_{}.npy'.format(args.num, args.algorithm, i),
    #             recorder.test_meme_loss[str(i)])
    #
    #     np.save('./save/{}/{}/train_acc_{}.npy'.format(args.num, args.algorithm, i), recorder.train_acc[str(i)])
    #     np.save('./save/{}/{}/train_meme_acc_{}.npy'.format(args.num, args.algorithm, i),
    #             recorder.train_meme_acc[str(i)])
    #     np.save('./save/{}/{}/val_acc_{}.npy'.format(args.num, args.algorithm, i), recorder.val_acc[str(i)])
    #     np.save('./save/{}/{}/val_meme_acc_{}.npy'.format(args.num, args.algorithm, i), recorder.val_meme_acc[str(i)])
    #     np.save('./save/{}/{}/test_acc_{}.npy'.format(args.num, args.algorithm, i), recorder.test_acc[str(i)])
    #     np.save('./save/{}/{}/test_meme_acc_{}.npy'.format(args.num, args.algorithm, i), recorder.test_meme_acc[str(i)])
    #
    # # 保存结果，进行实验分析
    # np.save('./save/{}/{}/acc_R{}_D{}_A{}_G{}_L{}_I{}.npy'.format(args.num, args.algorithm, args.R, args.dataset,
    #                                                               args.algorithm, args.global_model, args.local_model,
    #                                                               args.iid), np.array(acc))
    # np.save('./save/{}/{}/loss_R{}_D{}_A{}_G{}_L{}_I{}.npy'.format(args.num, args.algorithm, args.R, args.dataset,
    #                                                                args.algorithm, args.global_model, args.local_model,
    #                                                                args.iid), np.array(loss))
