import torch
from fast_data_utils import get_client_dataloader
import torch.optim as optim
from attack.fastattack import attack_loader
from torch.cuda.amp import GradScaler, autocast
import torch.nn.functional as F
from  utils import do_freeze, get_onehot, inv_eps, not_freeze,causal_loss_func,MixAttack,find_batchnorm_layers
from copy import deepcopy
from fast_network_utils import get_network
import torch.distributed as dist
import pickle
from attack.libfastattack.lossfunc import *




def get_cls_num_list(traindata_cls_counts,dataset):
    cls_num_list = []
    num_class = 10 if dataset !='cifar100' else 100
    if dataset == 'tiny':
        num_class = 200
    for key, val in traindata_cls_counts.items():
        temp = [0] * num_class  #
        for key_1, val_1 in val.items():
            temp[key_1] = val_1
        cls_num_list.append(temp)

    return cls_num_list


def distributed_train(rank,user_dict,args,epoch,world_size):
    torch.cuda.set_device(rank)
    
    dist.init_process_group(backend='nccl', world_size=world_size, rank=rank)
    user_list = user_dict[rank]
    for user in user_list:
        client_train(user,args, epoch,rank)
    print(f"GPU {rank} is finished!")


def bn_average_weights_alpha(w_avg, w_client, client_rank):
    """
    Returns the weighted average of the weights.
    """
    device = f"cuda:{client_rank}"
    for key in w_avg.keys():
        w_avg[key] = w_avg[key].to(device)
        w_client[key] = w_client[key].to(device)
        if 'num_batches_tracked' in key:
            w_client[key] = w_avg[key]
    return w_client

def client_train(user_id,args, epoch,rank,wide=0):
    # user_id = user_id + wide * 2
    rank = (user_id+1) % 2
    torch.cuda.set_device(rank)
    
    with open(f'{args.root}/temp/temp.pkl', 'rb') as pickle_file:
        loaded_dict = pickle.load(pickle_file)
    
    cls_num_list = get_cls_num_list(loaded_dict,args.dataset)[user_id]
    net_state_dict = torch.load(f"{args.root}/modelsave/n.pth") 
    c_net_state_dict = torch.load(f"{args.root}/modelsave/c.pth") 
    z_net_state_dict = torch.load(f"{args.root}/modelsave/z.pth")
    
    if epoch > 0:
        client_info = torch.load(f"{args.root}/temp/{user_id}.t7")
        c_net_weights = client_info['c_net']
        z_net_weights = client_info['z_net']
        c_net_state_dict = bn_average_weights_alpha(c_net_state_dict,c_net_weights,rank)
        z_net_state_dict = bn_average_weights_alpha(z_net_state_dict,z_net_weights,rank)
        
        
    
    net = get_network(network=args.network,depth=args.depth,dataset=args.dataset)
    net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
    net = net.to(memory_format=torch.channels_last).cuda()
    net.load_state_dict(net_state_dict)
    
    if args.network == 'wide':
        ch = True
    else:
        ch = False
    
    c_net = get_network(network='causal', depth=None, dataset=args.dataset, ch=ch)
    c_net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(c_net)
    c_net = c_net.to(memory_format=torch.channels_last).cuda()
    c_net.load_state_dict(c_net_state_dict)
    
    
    z_net = get_network(network='instrument', depth=args.depth, dataset=args.dataset, ch=ch)
    z_net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(z_net)
    z_net = z_net.to(memory_format=torch.channels_last).cuda()
    z_net.load_state_dict(z_net_state_dict)
    

    client_loader, test_loader = get_client_dataloader(user_id,args.batch_size, args.test_batch_size,args)
    optimizer = optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
    if args.dataset != "tiny":
        adv_attack = attack_loader(net=net, attack=args.attack, mode="Normal", cls_num_list=cls_num_list,eps=args.eps, steps=args.steps,weights=True)
    else:
        pgd_attack = attack_loader(net=net, attack="pgd", mode="Normal", cls_num_list=cls_num_list,eps=4/255, steps=args.steps,weights=True)
        fgsm_attack = attack_loader(net=net, attack="fgsm_train", mode="Normal", cls_num_list=cls_num_list,eps=4/255, steps=args.steps,weights=True)
        adv_attack = MixAttack(net=net, slowattack=pgd_attack, fastattack=fgsm_attack, train_iters=len(client_loader))
        
    
    adv_epoch = args.adv_epoch # if epoch <= 10 else 1
    lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0, max_lr=args.learning_rate,step_size_up=max(adv_epoch-2,1)*0.5 * len(client_loader),step_size_down=max(adv_epoch-2,1)*0.5* len(client_loader))

    scaler = GradScaler()
    # bst_acc = 0
    net.train()
    criterion = maxMarginLoss(cls_num_list=cls_num_list, max_m=0.8, s=10, weight=None).cuda()
    local_rounds = []
    image_nums = 0
    epoch_loss = []
    for _ in range(adv_epoch):
        batch_loss = []
        index_pgd = 0.0
        index = 0.0
        for _,  (inputs, targets) in enumerate(client_loader):
            origin_inputs, targets = inputs.cuda(), targets.cuda()
            
            inputs ,ka = adv_attack(origin_inputs, targets)
            optimizer.zero_grad()
            with autocast():
                outputs = net(inputs)
                # loss = F.cross_entropy(outputs, targets)
                
                
                loss = criterion(outputs,targets)
            
            index_pgd += sum(ka)
            ka = -(loss.sum().item())*len(inputs)
            index += ka
            batch_loss.append(loss.item())

            
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            lr_scheduler.step()
            image_nums = image_nums + inputs.shape[0]
        epoch_loss.append(sum(batch_loss)/len(batch_loss))
        
    mean_diff = sum(local_rounds)/image_nums

    do_freeze(net)
    if args.dataset != "tiny":
        causal_attack = attack_loader(net=net, attack=args.attack, mode="Normal", cls_num_list=cls_num_list,eps=args.eps, steps=args.steps,weights=True)
        c_optimizer = optim.AdamW([{'params': c_net.parameters()}], lr=args.causal_learning_rate,  weight_decay=args.causal_weight_decay, amsgrad=True)
        inst_optimizer = optim.AdamW([{'params': z_net.parameters()}], lr=args.causal_learning_rate,  weight_decay=args.causal_weight_decay, amsgrad=True)
    elif args.network == 'wide' and args.dataset == 'tiny':
        pgd_attack = attack_loader(net=net, attack="pgd", mode="Normal", cls_num_list=cls_num_list,eps=4/255, steps=args.steps,weights=True)
        fgsm_attack = attack_loader(net=net, attack="fgsm_train", mode="Normal", cls_num_list=cls_num_list,eps=4/255, steps=args.steps,weights=True)
        causal_attack = MixAttack(net=net, slowattack=pgd_attack, fastattack=fgsm_attack, train_iters=len(client_loader))
        c_optimizer = optim.AdamW([{'params': c_net.parameters()}], lr=args.causal_learning_rate, betas=(0.5, 0.999), weight_decay=args.causal_weight_decay)
        inst_optimizer = optim.AdamW([{'params': z_net.parameters()}], lr=args.causal_learning_rate, betas=(0.5, 0.999), weight_decay=args.causal_weight_decay)
    else:
        causal_attack = attack_loader(net=net, attack=args.attack, mode="Normal", cls_num_list=cls_num_list,eps=4/255, steps=args.steps,weights=True)
        c_optimizer = optim.AdamW([{'params': c_net.parameters()}], lr=args.causal_learning_rate, betas=(0.5, 0.999), weight_decay=args.causal_weight_decay)
        inst_optimizer = optim.AdamW([{'params': z_net.parameters()}], lr=args.causal_learning_rate, betas=(0.5, 0.999), weight_decay=args.causal_weight_decay)
    
    
    # causal_epoch = args.causal_epoch if epoch <= 10 else 1
    # causal_epoch = args.causal_epoch if epoch >= 100 else 1
    causal_epoch = args.causal_epoch
    c_net.train()
    z_net.train()
    net.eval()
    client_length = len(client_loader)
    for _ in range(causal_epoch):
        for _,  (inputs, targets) in enumerate(client_loader):
            inputs, targets = inputs.cuda(), targets.cuda()
            adv_inputs,_ = causal_attack(inputs, targets)

            c_optimizer.zero_grad(), inst_optimizer.zero_grad()
            
            with autocast():
                adv_feature = net(adv_inputs, pop=True)
                cln_feature = net(inputs, pop=True)
                residual = adv_feature - cln_feature

                adv_output = net(adv_feature.clone(), int=True)
                onehot_target = get_onehot(adv_output, targets)
                m_list = torch.FloatTensor(cls_num_list).cuda()
                m = 0.1*torch.log(m_list + 1e-7)
                onehot_target = onehot_target + m

                inst_feature = z_net(residual.clone())
                inst_output = net(cln_feature.clone() + inst_feature.clone(), int=True)

                causal_feature = c_net(inst_feature)
                causal_output = net((cln_feature.clone() + causal_feature.clone()), int=True)

                reg_loss = args.lamb * ((inst_feature - residual) ** 2).mean()
                # inst_criterion = nn.BCEWithLogitsLoss()
                inst_loss = -(onehot_target * F.log_softmax(causal_output) * F.log_softmax(inst_output)).sum(dim=1).mean() 
                
                max_total_loss = inst_loss + reg_loss
            
            scaler.scale(max_total_loss).backward()
            scaler.step(inst_optimizer)
            c_optimizer.zero_grad(), inst_optimizer.zero_grad()
            
            with autocast():
                inst_feature = z_net(residual)
                inst_output = net(cln_feature.clone() + inst_feature.clone(), int=True)

                causal_feature = c_net(inst_feature)
                causal_output = net(cln_feature.clone() + causal_feature.clone(), int=True)
                # causal_criterion = nn.BCEWithLogitsLoss()
                causal_loss = (onehot_target * F.log_softmax(causal_output) * F.log_softmax(inst_output)).sum(dim=1).mean()
                min_total_loss = causal_loss

            scaler.scale(min_total_loss).backward(retain_graph=True)
            scaler.step(c_optimizer)
            scaler.update()
    
    do_freeze(c_net)
    not_freeze(net)
    client_loader, test_loader = get_client_dataloader(user_id,args.cafe_batch_size, args.cafe_test_batch_size,args)
    
    if args.dataset == 'tiny':
        cafe_attack = attack_loader(net=net, attack="fgsm_train", mode="Normal", cls_num_list=cls_num_list,eps=4/255, steps=args.steps,weights=True)
        inv_causal = attack_loader(net=net, attack='causalpgd', eps=inv_eps(args.dataset, args.network), steps=3)
    else:
        cafe_attack = attack_loader(net=net, attack=args.attack, mode="Normal", cls_num_list=cls_num_list,eps=args.eps, steps=args.steps,weights=True)
        inv_causal = attack_loader(net=net, attack='causalpgd', eps=inv_eps(args.dataset, args.network), steps=args.steps)
    
    optimizer = optim.SGD(net.parameters(), lr=args.cafe_learning_rate, momentum=0.9, weight_decay=args.cafe_weight_decay)
    
    net.train()
    c_net.eval()
    # cafe_epoch = args.cafe_epoch if epoch <= 10 else 1
    # cafe_epoch = args.cafe_epoch if epoch >= 100  else 1
    cafe_epoch = args.cafe_epoch
    lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0, max_lr=args.cafe_learning_rate,step_size_up= cafe_epoch*len(client_loader)/2, step_size_down=cafe_epoch*len(client_loader)/2)
    criterion = maxMarginLoss(cls_num_list=cls_num_list, max_m=0.8, s=10, weight=None).cuda()
    for _ in range(cafe_epoch):
        for _,  (inputs, targets) in enumerate(client_loader):
            inputs, targets = inputs.cuda(), targets.cuda()
            if inputs.shape[0] == 1:
                targets = torch.tensor([targets]).cuda()
            adv_inputs, _= cafe_attack(inputs, targets)
            optimizer.zero_grad()
            with autocast():
                clean_feature = net(inputs, pop=True)
                # adv feature
                adv_feature = net(adv_inputs, pop=True)
                adv_outputs = net(adv_feature.clone(), int=True)

                # causal feature and output
                del_causal = c_net(adv_feature - clean_feature)
                causal_feature = clean_feature + del_causal
                causal_outputs = net(causal_feature.clone(), int=True)
            
            inv_inputs = inv_causal(inputs, targets, causal_outputs.detach())
            with autocast():
                inv_outputs = net(inv_inputs)
                # loss = F.cross_entropy(adv_outputs, targets) + causal_loss_func(adv_outputs, inv_outputs)
                loss = criterion(adv_outputs,targets) + causal_loss_func(adv_outputs, inv_outputs)
                # loss = criterion(adv_outputs,targets) + causal_loss_func(adv_outputs, causal_outputs)
                # loss = riterion(adv_outputs,targets) + causal_loss_func(adv_outputs, causal_outputs) + causal_loss_func(adv_outputs, inv_outputs)
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            

            # scheduling for Cyclic LR
            lr_scheduler.step()
    
    not_freeze(c_net)
    # # testrob
    
    
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    for _,  (inputs, targets) in enumerate(test_loader):
        inputs, targets = inputs.cuda(), targets.cuda()

        # Accerlating forward propagation
        with autocast():
            outputs = net(inputs)
            loss = F.cross_entropy(outputs, targets)

        test_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
    clean_acc = 100. * correct / total
    if args.dataset != 'tiny':
        test_attack = attack_loader(net=net, attack=args.attack, eps=args.eps, steps=20)
    else:
        test_attack = attack_loader(net=net, attack=args.attack, eps=4/255, steps=20)
    
    
    
    test_loss = 0
    correct = 0
    total = 0
    
    for _,  (inputs, targets) in enumerate(test_loader):
        inputs = test_attack(inputs, targets)
        inputs, targets = inputs.cuda(), targets.cuda()

        # Accerlating forward propagation
        with autocast():
            outputs = net(inputs)
            loss = F.cross_entropy(outputs, targets)

        test_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        # print(predicted)
        # print(targets)
    adv_acc = 100. * correct / total
    acc = (clean_acc + adv_acc)/2

    send_message = {}
    send_message['net'] = net.state_dict()
    send_message['c_net'] = c_net.state_dict()
    send_message['z_net'] = z_net.state_dict()
    send_message['user_id'] = user_id
    send_message['adv_acc'] = adv_acc
    send_message['clean_acc'] = clean_acc
    send_message['total_acc'] = acc
    send_message['mean_diff'] = mean_diff
    send_message['loss'] = sum(epoch_loss) / len(epoch_loss)
    send_message['ide'] = index / client_length
    send_message['pp_index'] = index_pgd/ client_length
    
    torch.save(send_message,f"{args.root}/temp/{user_id}.t7")
    
    if args.causal_log:
        torch.save(c_net.state_dict(),f"{args.root}/causal_log/u{user_id}e{epoch}.t7")
    
    torch.cuda.empty_cache()
    print(f'{user_id} is finished! Its rob is {adv_acc} and acc is {clean_acc}')
    

    
