#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6

import os
import torch
from torch.backends import cudnn
from torch.utils.data import DataLoader

from dataset.datasets import get_data
from nodes import GlobalNode, Node, DatasetSplit
from util.options import args_parser
from util.utils import print_message, Recorder, lr_scheduler, get_dataloader
import numpy as np
import random

def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    cudnn.deterministic = True

if __name__ == '__main__':
    # 设置随机种子
    setup_seed(2020)
    args = args_parser()
    print_message(args)  # 打印配置信息
    device = torch.device(args.device)  # 指定gpu
    # 获取数据
    dataset_train, dataset_test, dict_users_train, dict_users_test = get_data(args)
    test_dataloader = DataLoader(dataset_test, batch_size=args.batchsize, num_workers=4)
    if args.algorithm == 'fml':
        from methods.fed_mutual import train
    elif args.algorithm == 'fedavg':
        from methods.fed_avg import train
    elif args.algorithm == 'fedprox':
        from methods.fed_prox import train
    # # init nodes
    global_node = GlobalNode(args)
    node_list = []
    for i in range(args.num_users):
        node_list.append(Node(i, args, dataset_train, dataset_test, dict_users_train[i], dict_users_test[i]))
    # 打印各个节点的模型信息
    for i in range(args.num_users):
        print("Node:{},customized_model:{}".format(i, type(node_list[i].customized_model)))
    print("\nGlobal model:{}".format(type(global_node.model)))

    recorder = Recorder(args)
    loss = []
    acc = []
    best_acc = None

    for rounds in range(args.R):
        print('===============The {:d}-th round==============='.format(rounds + 1))
        # if args.iid == 1:
        #     lr_scheduler(rounds, node_list, args)

        # 对各个本地节点训练
        for i in range(len(node_list)):
            node_list[i].fork(global_node)  # 模型下发，每个本地节点copy一份全局模型
            # # 测试下全局
            recorder.validate_local(node_list[i])  # fedavg把训练好的copy_meme传给本地
            recorder.validate_meme(node_list[i])  # meme查看本地效果
            recorder.mu = args.mu
            for epoch in range(args.E):  # 本地蒸馏次数
                if args.algorithm == 'fedprox':
                    train(node_list[i], recorder, global_node)
                else:
                    train(node_list[i], recorder)
        global_node.average(node_list)  # 模型聚合
        loss_global, acc_global = global_node.test(test_dataloader)
        loss.append(loss_global)  # 保存meme聚合后测试的acc和loss
        acc.append(acc_global)
    #
    # R个epoch训练结束，打印模型测试的结果
    print('\nBest model, acc: {}'.format(np.array(acc).max()))
    # 保存结果，进行实验分析
    result_save_dir = './save/result_save/{}/'.format(args.results_save)
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir, exist_ok=True)
    for i in range(args.num_users + 1):
        np.save('{}/val_acc_{}.npy'.format(result_save_dir, i), recorder.val_acc[str(i)])
        np.save('{}/val_meme_acc_{}.npy'.format(result_save_dir, i), recorder.val_meme_acc[str(i)])
        np.save('{}/test_acc_{}.npy'.format(result_save_dir, i), recorder.test_acc[str(i)])
        np.save('{}/test_meme_acc_{}.npy'.format(result_save_dir, i), recorder.test_meme_acc[str(i)])

    np.save('{}/acc.npy'.format(result_save_dir), np.array(acc))
    np.save('{}/loss.npy'.format(result_save_dir), np.array(loss))