import argparse
from utils import *
from fast_network_utils import get_network
import torch.multiprocessing as mp
from client import *
import warnings
from fast_data_utils import get_test_dataloader
warnings.filterwarnings(action='ignore')
import pandas as pd
from tqdm import tqdm
import time



parser = argparse.ArgumentParser()

parser.add_argument('--NAME', default='ADV', type=str)
parser.add_argument('--dataset', default='cifar10', type=str)
parser.add_argument('--network', default='resnet', type=str)
parser.add_argument('--depth', default=18, type=int)
parser.add_argument('--gpu', default='0,1', type=str)
parser.add_argument('--port', default="12355", type=str)
parser.add_argument('--load', default='False', type=str2bool)
parser.add_argument('--causal_log', default='False', type=str2bool)


# learning parameter
parser.add_argument('--learning_rate', default=0.1, type=float)
parser.add_argument('--weight_decay', default=0.0002, type=float)
parser.add_argument('--batch_size', default=128, type=float)
parser.add_argument('--test_batch_size', default=128, type=float)
parser.add_argument('--lamb', default=1, type=int)
parser.add_argument('--gamma',default=0, type=float)
parser.add_argument('--pri',default=1.4, type=float)
parser.add_argument('--topk',default=2, type=int)
parser.add_argument('--frac', type=float, default=1.0, help='the fraction of clients: C')



parser.add_argument('--adv_epoch', default=10, type=int)
parser.add_argument('--causal_epoch', default=5, type=int)
parser.add_argument('--cafe_epoch', default=4, type=int)

# parser.add_argument('--adv_epoch', default=30, type=int)
# parser.add_argument('--causal_epoch', default=10, type=int)
# parser.add_argument('--cafe_epoch', default=4, type=int)


parser.add_argument('--total_epoch', default=150, type=int)

# attack parameter only for CIFAR-10 and SVHN
parser.add_argument('--attack', default='pgd', type=str)
parser.add_argument('--eps', default=0.03, type=float)
parser.add_argument('--steps', default=10, type=int)
parser.add_argument('--num_users', default=5, type=int)
current_directory = os.getcwd()
parser.add_argument('--root', default=current_directory, type=str)
parser.add_argument('--causal_learning_rate', default=0.0001, type=float)
parser.add_argument('--causal_weight_decay', default=0.00001, type=float)
parser.add_argument('--cafe_learning_rate', default=0.001, type=float)
parser.add_argument('--cafe_weight_decay', default=0.0002, type=float)
parser.add_argument('--cafe_batch_size', default=64, type=float)
parser.add_argument('--cafe_test_batch_size', default=64, type=float)
args = parser.parse_args()

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = args.port

    

if __name__ == "__main__":
    global_rank = 0
    setup_seed(3407)
    checkmade_dir(f"{args.root}/modelsave",delete=False)
    current_directory = os.path.abspath(os.getcwd())
    csv_path = f'{current_directory}/{args.network}-{args.depth}-{args.dataset}.csv'
    delete_file(csv_path)
    header = [f'client_{i}_acc' for i in range(args.num_users)] + [f'client_{i}_rob' for i in range(args.num_users)] + ['cln_acc','rob_acc','bst_cln_acc','bst_rob_acc','epoch']
    df = pd.DataFrame(columns=header)
    df.to_csv(csv_path, index=False)
    
    if args.network == 'wide':
        ch = True
    else:
        ch = False
    
    torch.cuda.set_device(global_rank)
    net = get_network(network=args.network,depth=args.depth,dataset=args.dataset)
    net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
    net = net.to(memory_format=torch.channels_last)#.cuda()
    
    c_net = get_network(network='causal', depth=None, dataset=args.dataset, ch=ch)
    c_net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(c_net)
    c_net = c_net.to(memory_format=torch.channels_last)#.cuda()
    
    z_net = get_network(network='instrument', depth=args.depth, dataset=args.dataset, ch=ch)
    z_net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(z_net)
    z_net = z_net.to(memory_format=torch.channels_last)#.cuda()
    
    if args.load:
        net_state_dict = torch.load(f"{args.root}/modelsave/n_acc.pth")['state_dict']
        c_net_state_dict = torch.load(f"{args.root}/modelsave/c_acc.pth")['state_dict']
        z_net_state_dict = torch.load(f"{args.root}/modelsave/z_acc.pth")['state_dict']
    else:
        net_state_dict = net.state_dict()
        c_net_state_dict = c_net.state_dict()
        z_net_state_dict = z_net.state_dict()
        
    torch.save(net_state_dict,"modelsave/n.pth")
    torch.save(c_net_state_dict,"modelsave/c.pth")
    torch.save(z_net_state_dict,"modelsave/z.pth")
    
    bst_cln_acc = -1
    bst_rob_acc = -1
    nbest_acc_ckpt = f'modelsave/n_acc-{args.network}-{args.depth}-{args.dataset}.pth'
    nbest_asr_ckpt = f'modelsave/n_asr-{args.network}-{args.depth}-{args.dataset}.pth'
    
    cbest_acc_ckpt = f'modelsave/c_acc-{args.network}-{args.depth}-{args.dataset}.pth'
    cbest_asr_ckpt = f'modelsave/c_asr-{args.network}-{args.depth}-{args.dataset}.pth'
    
    zbest_acc_ckpt = f'modelsave/z_acc-{args.network}-{args.depth}-{args.dataset}.pth'
    zbest_asr_ckpt = f'modelsave/z_asr-{args.network}-{args.depth}-{args.dataset}.pth'
    
    num_users = args.num_users
    total_epoch = args.total_epoch
    test_loader = get_test_dataloader(test_batch_size=64,args=args)
    gpu_list = list(map(int, args.gpu.split(',')))
    gpu_counts = len(gpu_list)
    for epoch in tqdm(range(total_epoch)):
        mp.spawn(
            client_train,  # 要在每个进程中运行的函数
            args=(args,epoch,gpu_counts,0),  # 传递给 worker_fn 的参数
            nprocs=5,  # 进程数量
            join=True  # 等待所有进程完成
        )
        print(f"{epoch} Training is finished!")
        
        global_info = [torch.load(f"{args.root}/temp/{user_id}.t7") for user_id in range(num_users)]
        local_net_wegihts = [item['net'] for item in global_info]
        local_c_net_weights = [item['c_net'] for item in global_info]
        local_z_net_weights = [item['z_net'] for item in global_info]
        
        idt = [item['ide'] for item in global_info]
        ipp = [item['pp_index'] for item in global_info]
        idt_sorted = np.sort(idt)
        idtxnum = float('inf')
        idtx = args.topk
        m = max(int(args.frac * args.num_users), 1)
        idtxnum = idt_sorted[m-idtx]
        
        
        
        net_state_dict = average_weights_alpha(local_net_wegihts, idt, idtxnum, args.pri, global_rank=global_rank)
        c_net_state_dict = average_weights_alpha(local_c_net_weights, idt, idtxnum, args.pri, global_rank=global_rank)
        z_net_state_dict = average_weights_alpha(local_z_net_weights, idt, idtxnum, args.pri, global_rank=global_rank)
        
        
        torch.save(net_state_dict,"modelsave/n.pth")
        torch.save(c_net_state_dict,"modelsave/c.pth")
        torch.save(z_net_state_dict,"modelsave/z.pth")
        
        net.load_state_dict(net_state_dict)
        c_net.load_state_dict(c_net_state_dict)
        z_net.load_state_dict(z_net_state_dict)
        
        net = net.cuda()
        net.eval()
        test_loader = get_test_dataloader(test_batch_size=args.test_batch_size,args=args)
        
        test_loss = 0
        correct = 0
        total = 0
        for _,  (inputs, targets) in enumerate(test_loader):
            inputs, targets = inputs.cuda(), targets.cuda()

            # Accerlating forward propagation
            with autocast():
                outputs = net(inputs)
                loss = F.cross_entropy(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
        cln_acc = 100. * correct / total
        
        del inputs, targets
        
        if args.dataset != 'tiny':
            attack = attack_loader(net=net, attack=args.attack, eps=args.eps, steps=20)
        else:
            attack = attack_loader(net=net, attack=args.attack, eps=4/255, steps=20)
        
        test_loss = 0
        correct = 0
        total = 0
        
        for _,  (inputs, targets) in enumerate(test_loader):
            inputs = attack(inputs, targets)
            inputs, targets = inputs.cuda(), targets.cuda()

            # Accerlating forward propagation
            with autocast():
                outputs = net(inputs)
                loss = F.cross_entropy(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
        rob_acc = 100. * correct / total
        
        del inputs, targets
        
        print("Epoch:{},\t cln_acc:{}, \t rob_acc:{}".format(epoch, cln_acc, rob_acc))
        save_checkpoint({
            'state_dict': net.state_dict(),
            'epoch': epoch,
        }, cln_acc > bst_cln_acc, nbest_acc_ckpt)

        save_checkpoint({
            'state_dict': net.state_dict(),
            'epoch': epoch,
        }, rob_acc > bst_rob_acc, nbest_asr_ckpt)
        
        save_checkpoint({
            'state_dict': c_net.state_dict(),
            'epoch': epoch,
        }, cln_acc > bst_cln_acc, cbest_acc_ckpt)

        save_checkpoint({
            'state_dict': c_net.state_dict(),
            'epoch': epoch,
        }, rob_acc > bst_rob_acc, cbest_asr_ckpt)
        
        save_checkpoint({
            'state_dict': z_net.state_dict(),
            'epoch': epoch,
        }, cln_acc > bst_cln_acc, zbest_acc_ckpt)

        save_checkpoint({
            'state_dict': z_net.state_dict(),
            'epoch': epoch,
        }, rob_acc > bst_rob_acc, zbest_asr_ckpt)
        bst_cln_acc = max(bst_cln_acc, cln_acc)
        bst_rob_acc = max(bst_rob_acc, rob_acc)
        
        client_accs = []
        client_robs = []
        for i in range(len(global_info)):
            item = global_info[i]
            if item['user_id'] == i:
                client_accs.append(item['clean_acc'])
                client_robs.append(item['adv_acc'])
        
        row_data = client_accs + client_robs + [cln_acc, rob_acc, bst_cln_acc, bst_rob_acc, epoch]
        new_row = pd.DataFrame([row_data], columns = header)
        new_row.to_csv(csv_path, mode='a', header=False, index=False)
        net = net.cpu()
        
        del global_info,local_net_wegihts,local_c_net_weights,local_z_net_weights,net_state_dict
        del c_net_state_dict,z_net_state_dict,test_loader

        with torch.cuda.device(0):
            torch.cuda.empty_cache()
        with torch.cuda.device(1):
            torch.cuda.empty_cache()

    