import argparse
from utils import *
from fast_network_utils import get_network
import torch.multiprocessing as mp
from client import *
import warnings
import torch.multiprocessing as mp
from fast_data_utils import get_test_dataloader
warnings.filterwarnings(action='ignore')
import pandas as pd
from tqdm import tqdm


parser = argparse.ArgumentParser()

parser.add_argument('--NAME', default='ADV', type=str)
parser.add_argument('--dataset', default='tiny', type=str)
parser.add_argument('--network', default='vgg', type=str)
parser.add_argument('--depth', default=16, type=int)
parser.add_argument('--gpu', default='0,1', type=str)
parser.add_argument('--port', default="12355", type=str)
parser.add_argument('--load', default='False', type=str2bool)

# learning parameter
parser.add_argument('--learning_rate', default=0.1, type=float)
parser.add_argument('--weight_decay', default=0.0002, type=float)
parser.add_argument('--batch_size', default=128, type=float)
parser.add_argument('--test_batch_size', default=256, type=float)
parser.add_argument('--lamb', default=1, type=int)
parser.add_argument('--gamma',default=0, type=float)
parser.add_argument('--pri',default=1.0, type=float)
parser.add_argument('--topk',default=1, type=int)



parser.add_argument('--adv_epoch', default=30, type=int)
parser.add_argument('--causal_epoch', default=10, type=int)
parser.add_argument('--cafe_epoch', default=4, type=int)


parser.add_argument('--total_epoch', default=150, type=int)

# attack parameter only for CIFAR-10 and SVHN
parser.add_argument('--attack', default='pgd', type=str)
parser.add_argument('--eps', default=0.03, type=float)
parser.add_argument('--steps', default=10, type=int)
parser.add_argument('--num_users', default=5, type=int)
current_directory = os.getcwd()
parser.add_argument('--root', default=current_directory, type=str)

args = parser.parse_args()

global_rank = 0
torch.cuda.set_device(global_rank)

# net = get_network(network=args.network,depth=args.depth,dataset=args.dataset)
# net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
# net = net.to(memory_format=torch.channels_last).cuda()
# net_state_dict = torch.load(f"/media/yunhao/DATA/Project/FCAT/FederatedCausal/modelsave/n_asr-vgg-16-tiny.pth")['state_dict']
# net.load_state_dict(net_state_dict)
# print("load success")
# net.eval()

client_loadera, test_loader = get_client_dataloader(0,args.batch_size, args.test_batch_size,args)
client_loaderb, test_loader = get_client_dataloader(1,args.batch_size, args.test_batch_size,args)
client_loaderc, test_loader = get_client_dataloader(2,args.batch_size, args.test_batch_size,args)
client_loaderd, test_loader = get_client_dataloader(3,args.batch_size, args.test_batch_size,args)
client_loadere, test_loader = get_client_dataloader(4,args.batch_size, args.test_batch_size,args)
client_targets = []
for _,  (inputs, targets) in enumerate(client_loadera):
    inputs, targets = inputs, targets.cpu().tolist()
    client_targets = client_targets + targets

for _,  (inputs, targets) in enumerate(client_loaderb):
    inputs, targets = inputs, targets.cpu().tolist()
    client_targets = client_targets + targets

for _,  (inputs, targets) in enumerate(client_loaderc):
    inputs, targets = inputs, targets.cpu().tolist()
    client_targets = client_targets + targets

for _,  (inputs, targets) in enumerate(client_loaderd):
    inputs, targets = inputs, targets.cpu().tolist()
    client_targets = client_targets + targets

for _,  (inputs, targets) in enumerate(client_loadere):
    inputs, targets = inputs, targets.cpu().tolist()
    client_targets = client_targets + targets


unique_values = set(client_targets)
print("traiset:",len(unique_values))
print(unique_values)

client_targets = []
for _,  (inputs, targets) in enumerate(test_loader):
    inputs, targets = inputs, targets.cpu().tolist()
    client_targets = client_targets + targets

unique_values = set(client_targets)
print("testset:",len(unique_values))
print(unique_values)

# test_loader = get_test_dataloader(test_batch_size=64,args=args)
# test_loss = 0
# correct = 0
# total = 0

# for _,  (inputs, targets) in enumerate(test_loader):
#     inputs, targets = inputs.cuda(), targets.cuda()

#     # Accerlating forward propagation
#     with autocast():
#         outputs = net(inputs)
#         loss = F.cross_entropy(outputs, targets)

#     test_loss += loss.item()
#     _, predicted = outputs.max(1)
#     total += targets.size(0)
#     correct += predicted.eq(targets).sum().item()
#     print("targets",targets)
#     print("prediction",predicted)
# cln_acc = 100. * correct / total

# if args.dataset != 'tiny':
#     attack = attack_loader(net=net, attack=args.attack, eps=args.eps, steps=20)
# else:
#     attack = attack_loader(net=net, attack=args.attack, eps=4/255, steps=20)


# for _,  (inputs, targets) in enumerate(test_loader):
#     inputs = attack(inputs, targets)
#     inputs, targets = inputs.cuda(), targets.cuda()

#     # Accerlating forward propagation
#     with autocast():
#         outputs = net(inputs)
#         loss = F.cross_entropy(outputs, targets)

#     test_loss += loss.item()
#     _, predicted = outputs.max(1)
#     total += targets.size(0)
#     correct += predicted.eq(targets).sum().item()
# rob_acc = 100. * correct / total
# print(rob_acc)
# print(cln_acc)