import argparse
import copy
import json
import os
import time

import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm

from data.datasets import partition_data
from models.Models import get_model
from utils.Seed import setup_seed
from utils.utils import DatasetSplit, test


def args_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument('--aggregation', type=str, default='fedavg')
    parser.add_argument('--epochs', type=int, default=10)
    parser.add_argument('--num_users', type=int, default=5)
    parser.add_argument('--frac', type=float, default=1.0)
    parser.add_argument('--local_ep', type=int, default=1)
    parser.add_argument('--local_bs', type=int, default=128)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--optimizer', type=str, default='SGD')
    parser.add_argument('--device', type=str, default='cuda:1')
    parser.add_argument('--momentum', type=float, default=0.9)
    # parser.add_argument('--dataset', type=str, default='mnist')
    parser.add_argument('--dataset', type=str, default='cifar10')
    parser.add_argument('--iid', type=int, default=1)
    parser.add_argument('--save_dir', type=str, default='run/synthesis')
    parser.add_argument('--partition', type=str, default='dirichlet')
    # parser.add_argument('--partition', type=str, default='iid')
    parser.add_argument('--beta', type=float, default=0.5)
    parser.add_argument('--batch_size', type=int, default=256)
    parser.add_argument('--seed', type=int, default=1)
    # parser.add_argument('--model', type=str, default='mnist_clip')
    parser.add_argument('--model', type=str, default='cnn_clip')
    # parser.add_argument('--model', type=str, default='cnn')
    # parser.add_argument('--model', type=str, default='CNNCifar2')
    args = parser.parse_args()
    return args


def get_optimizer(config, model):
    optimizer = None
    if config.optimizer == "SGD":
        optimizer = torch.optim.SGD(model.parameters(), lr=config.lr, momentum=config.momentum)
    return optimizer


def update_weights(w):
    """
    Returns the average of the weights.
    """
    w_avg = copy.deepcopy(w[0].state_dict())
    for key in w_avg.keys():
        for i in range(1, len(w)):
            w_avg[key] += w[i].state_dict()[key]
        if 'num_batches_tracked' in key:
            w_avg[key] = w_avg[key].true_divide(len(w))
        else:
            w_avg[key] = torch.div(w_avg[key], len(w))
    return w_avg


if __name__ == '__main__':
    os.environ["TOKENIZERS_PARALLELISM"] = "false"

    config = args_parser()

    # 固定随机数
    setup_seed(config.seed)

    # 加载数据
    train_dataset, test_dataset, user_groups, traindata_cls_counts = partition_data(
        config.dataset, config.partition, beta=config.beta, num_users=config.num_users)

    # 初始化模型
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=256,
                                              shuffle=False, num_workers=4)
    global_model = get_model(config)
    net = []
    for i in range(config.num_users):
        net.append(copy.deepcopy(global_model))

    print('started training...')
    loss_data = []
    # 开始训练
    for global_epoch in range(config.epochs):
        begin_time = time.time()
        # 选择训练的客户端
        choose_user = int(config.frac * config.num_users)
        assert choose_user > 0, "choose_user > 0"
        choose_user_ids = np.random.choice(config.num_users, choose_user, replace=False)

        client_loss_data = []
        # send w to client
        for user_id in choose_user_ids:
            global_state_dict = copy.deepcopy(global_model.state_dict())
            net[user_id].load_state_dict(global_state_dict)
            # 训练本地模型
            model = net[user_id]
            train_loader = DataLoader(DatasetSplit(train_dataset, user_groups[user_id]),
                                      batch_size=config.local_bs, shuffle=True, num_workers=4)
            optimizer = get_optimizer(config, model)

            # pbar = tqdm(range(config.local_ep), desc='LocalTrain', unit='item')
            # for i, epoch in enumerate(pbar):
            for epoch in range(config.local_ep):
                for idx, (x, y) in enumerate(train_loader):
                    x, y = x.to(config.device), y.to(config.device)
                    model.zero_grad()
                    # ---------------------------------------
                    output = model(x)
                    loss = torch.nn.functional.cross_entropy(output, y)
                    # ---------------------------------------
                    loss.backward()
                    optimizer.step()

                # acc, test_loss = test(model, test_loader, config)
                # pbar.set_postfix(
                #     {"client_id": user_id, "epoch": epoch, "acc": acc, "test_loss": test_loss},
                #     refresh=True)
            acc, test_loss = test(model, test_loader, config)
            print(f'client_id: {user_id}, acc: {acc}, test_loss: {test_loss}')
            client_loss_data.append({"client_id": int(user_id), "acc": acc, "test_loss": test_loss})

        # 聚合参数
        global_model_w = update_weights(net)
        global_model.load_state_dict(global_model_w)

        # 验证
        acc, test_loss = test(global_model, test_loader, config)
        print(f"global epoch: {global_epoch}, acc: {acc}, test_loss: {test_loss}")
        loss_data.append({"global_epoch": global_epoch, "acc": acc, "test_loss": test_loss, "clients": client_loss_data})

        # 判断目录是否存在
        if not os.path.exists(config.save_dir):
            # 目录不存在，创建目录
            os.makedirs(config.save_dir)
        # 保存全局模型
        # torch.save(global_model, f"{config.save_dir}/global_model_clip_{config.aggregation}_{epoch}.pth")

        end_time = time.time()
        print(f'epoch {global_epoch} cost {end_time-begin_time}s')

        with open(f'{config.dataset}_{config.aggregation}_{config.partition}_{config.model}_{config.beta}.json', 'w') as f:
            json.dump(loss_data, f)