import numpy as np
import json
import torch
import torch.optim as optim
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.utils.data as data
import argparse
import logging
import os
import copy
from math import *
import random

import datetime
#from torch.utils.tensorboard import SummaryWriter

from model import *
from utils import *
from vggmodel import *
from resnetcifar import *

import multiprocessing
import statistics
import pdb

from draw import *

def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='simple-cnn', help='neural network used in training')
    parser.add_argument('--dataset', type=str, default='mnist', help='dataset used for training')
    parser.add_argument('--net_config', type=lambda x: list(map(int, x.split(', '))))
    parser.add_argument('--partition', type=str, default='wzr', help='the data partitioning strategy')
    parser.add_argument('--batch-size', type=int, default=64, help='input batch size for training (default: 64)')
    parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)')
    parser.add_argument('--epochs', type=int, default=10, help='number of local epochs')
    parser.add_argument('--n_parties', type=int, default=100,  help='number of workers in a distributed cluster')
    parser.add_argument('--alg', type=str, default='fedavg', help='fl algorithms: fedavg/bipole/foolsgold/multi-krum')
    parser.add_argument('--use_projection_head', type=bool, default=False, help='whether add an additional header to model or not (see MOON)')
    parser.add_argument('--out_dim', type=int, default=256, help='the output dimension for the projection layer')
    parser.add_argument('--loss', type=str, default='contrastive', help='for moon')
    parser.add_argument('--temperature', type=float, default=0.5, help='the temperature parameter for contrastive loss')
    parser.add_argument('--comm_round', type=int, default=30, help='number of maximum communication roun')
    parser.add_argument('--is_same_initial', type=int, default=1, help='Whether initial all the models with the same parameters in fedavg')
    parser.add_argument('--init_seed', type=int, default=0, help="Random seed")
    parser.add_argument('--dropout_p', type=float, required=False, default=0.0, help="Dropout probability. Default=0.0")
    parser.add_argument('--datadir', type=str, required=False, default="./data/", help="Data directory")
    parser.add_argument('--reg', type=float, default=1e-5, help="L2 regularization strength")
    parser.add_argument('--logdir', type=str, required=False, default="./logs/", help='Log directory path')
    parser.add_argument('--modeldir', type=str, required=False, default="./models/", help='Model directory path')
    parser.add_argument('--beta', type=float, default=0.5, help='The parameter for the dirichlet distribution for data partitioning')
    parser.add_argument('--device', type=str, default='cuda:0', help='The device to run the program')
    parser.add_argument('--log_file_name', type=str, default=None, help='The log file name')
    parser.add_argument('--optimizer', type=str, default='sgd', help='the optimizer')
    parser.add_argument('--mu', type=float, default=0.001, help='the mu parameter for fedprox')
    parser.add_argument('--noise', type=float, default=0, help='how much noise we add to some party')
    parser.add_argument('--noise_type', type=str, default='level', help='Different level of noise or different space of noise')
    parser.add_argument('--rho', type=float, default=0, help='Parameter controlling the momentum SGD')
    parser.add_argument('--sample', type=float, default=1, help='Sample ratio for each communication round')
    parser.add_argument('--p_solo', type=float, default=0.2, help='Poison ratio of solo adversaries for each communication round')
    parser.add_argument('--p_team', type=float, default=0.4, help='Poison ratio of team adversaries for each communication round')
    parser.add_argument('--tau', type=float, default=0.05, help='Step size for Bipole')
    parser.add_argument('--replication', type=str, default=0, help='Parameter controlling training or loading model')
    args = parser.parse_args()
    return args

def init_nets(net_configs, dropout_p, n_parties, args):

    nets = {net_i: None for net_i in range(n_parties)}

    if args.dataset in {'mnist', 'cifar10', 'svhn', 'fmnist'}:
        n_classes = 10
    elif args.dataset == 'celeba':
        n_classes = 2
    elif args.dataset == 'cifar100':
        n_classes = 100
    elif args.dataset == 'tinyimagenet':
        n_classes = 200
    elif args.dataset == 'femnist':
        n_classes = 62
    elif args.dataset == 'emnist':
        n_classes = 47
    elif args.dataset in {'a9a', 'covtype', 'rcv1', 'SUSY'}:
        n_classes = 2
    if args.use_projection_head:
        add = ""
        if "mnist" in args.dataset and args.model == "simple-cnn":
            add = "-mnist"
        for net_i in range(n_parties):
            net = ModelFedCon(args.model+add, args.out_dim, n_classes, net_configs)
            nets[net_i] = net
    else:
        if args.alg == 'moon':
            add = ""
            if "mnist" in args.dataset and args.model == "simple-cnn":
                add = "-mnist"
            for net_i in range(n_parties):
                net = ModelFedCon_noheader(args.model+add, args.out_dim, n_classes, net_configs)
                nets[net_i] = net
        else:
            for net_i in range(n_parties):
                if args.dataset == "generated":
                    net = PerceptronModel()
                elif args.model == "mlp":
                    if args.dataset == 'covtype':
                        input_size = 54
                        output_size = 2
                        hidden_sizes = [32,16,8]
                    elif args.dataset == 'a9a':
                        input_size = 123
                        output_size = 2
                        hidden_sizes = [32,16,8]
                    elif args.dataset == 'rcv1':
                        input_size = 47236
                        output_size = 2
                        hidden_sizes = [32,16,8]
                    elif args.dataset == 'SUSY':
                        input_size = 18
                        output_size = 2
                        hidden_sizes = [16,8]
                    net = FcNet(input_size, hidden_sizes, output_size, dropout_p)
                elif args.model == "vgg":
                    net = vgg11()
                elif args.model == "simple-cnn":
                    if args.dataset in ("cifar10", "cinic10", "svhn"):
                        net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=10)
                    elif args.dataset in ("mnist", 'femnist', 'fmnist'):
                        net = SimpleCNNMNIST(input_dim=(16 * 4 * 4), hidden_dims=[120, 84], output_dim=10)
                    elif args.dataset == 'celeba':
                        net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=2)
                elif args.model == "vgg-9":
                    if args.dataset in ("mnist", 'femnist'):
                        net = ModerateCNNMNIST()
                    elif args.dataset in ("cifar10", "cinic10", "svhn"):
                        # print("in moderate cnn")
                        net = ModerateCNN()
                    elif args.dataset == 'celeba':
                        net = ModerateCNN(output_dim=2)
                elif args.model == "resnet":
                    net = ResNet50_cifar10()
                elif args.model == "vgg16":
                    net = vgg16()
                else:
                    print("not supported yet")
                    exit(1)
                nets[net_i] = net

    model_meta_data = []
    layer_type = []
    for (k, v) in nets[0].state_dict().items():
        model_meta_data.append(v.shape)
        layer_type.append(k)
    return nets, model_meta_data, layer_type


def train_net(net_id, net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, device="cpu"):
    # logger.info('Training network %s' % str(net_id))

    # train_acc = compute_accuracy(net, train_dataloader, device=device)
    # test_acc = compute_accuracy(net, test_dataloader, device=device)

    # logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
    # logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))

    if args_optimizer == 'adam':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg)
    elif args_optimizer == 'amsgrad':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg,
                               amsgrad=True)
    elif args_optimizer == 'sgd':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, momentum=args.rho, weight_decay=args.reg)
    criterion = nn.CrossEntropyLoss().to(device)

    cnt = 0
    if type(train_dataloader) == type([1]):
        pass
    else:
        train_dataloader = [train_dataloader]

    #writer = SummaryWriter()

    
    for epoch in range(epochs):
        av_list = []
        epoch_loss_collector = []
        for tmp in train_dataloader:
            for batch_idx, (x, target) in enumerate(tmp):
                x, target = x.to(device), target.to(device)

                optimizer.zero_grad()
                x.requires_grad = True
                target.requires_grad = False
                target = target.long()

                out = net(x)
                loss = criterion(out, target)

                loss.backward()

                # if (epoch == 0 or epoch == epochs - 1) and net_id == 5:
                #     fc3_weight = list(net.parameters())[-2].grad
                #     av = torch.norm(fc3_weight.float(), 1).item()
                #     av_list.append(av)
                
                optimizer.step()
        
        # if (epoch == 0 or epoch == epochs - 1) and net_id == 5:
        #     with open('av_list_normal.txt', 'a') as f:
        #         f.write('epoch: {}\n'.format(str(epoch)))
        #         for av in av_list:
        #             f.write(str(av) + '\n')
        #         f.write('---------------------------------------\n')

        #         cnt += 1
        #         epoch_loss_collector.append(loss.item())

        # epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)
        # if epoch % 4 == 0:
        #     logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))

        #train_acc = compute_accuracy(net, train_dataloader, device=device)
        #test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)

        #writer.add_scalar('Accuracy/train', train_acc, epoch)
        #writer.add_scalar('Accuracy/test', test_acc, epoch)

        # if epoch % 10 == 0:
        #     logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))
        #     train_acc = compute_accuracy(net, train_dataloader, device=device)
        #     test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
        #
        #     logger.info('>> Training accuracy: %f' % train_acc)
        #     logger.info('>> Test accuracy: %f' % test_acc)


    # train_acc = compute_accuracy(net, train_dataloader, device=device)
    # test_acc = compute_accuracy(net, test_dataloader, device=device)

    # logger.info('>> Training accuracy: %f' % train_acc)
    # logger.info('>> Test accuracy: %f' % test_acc)

    net.to('cpu')
    # logger.info(' ** Training complete **')
    # return train_acc, test_acc


# def train_net_poison_solo(net_id, net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, device="cpu"):
#     logger.info('Training network %s' % str(net_id))

#     # train_acc = compute_accuracy(net, train_dataloader, device=device)
#     # test_acc = compute_accuracy(net, test_dataloader, device=device)

#     # logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
#     # logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))

#     if args_optimizer == 'adam':
#         optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg)
#     elif args_optimizer == 'amsgrad':
#         optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg,
#                                amsgrad=True)
#     elif args_optimizer == 'sgd':
#         optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, momentum=args.rho, weight_decay=args.reg)
#     criterion = nn.CrossEntropyLoss().to(device)

#     cnt = 0
#     if type(train_dataloader) == type([1]):
#         pass
#     else:
#         train_dataloader = [train_dataloader]

#     #writer = SummaryWriter()
    
#     for epoch in range(epochs):
#         av_list = []
#         epoch_loss_collector = []
#         for tmp in train_dataloader:
#             for batch_idx, (x, target) in enumerate(tmp):
#                 x, target = x.to(device), target.to(device)

#                 optimizer.zero_grad()
#                 x.requires_grad = True
#                 target.requires_grad = False
#                 target = target.long()
                
#                 target = torch.randint(10, target.shape).to(device)
                
#                 # target = torch.tensor(list(map(lambda x: random.randint(0, 9), target))).to(device)
                
#                 out = net(x)
                
#                 loss = criterion(out, target)

#                 loss.backward()

#                 # if (epoch == 0 or epoch == epochs - 1) and net_id == 8:
#                 #     fc3_weight = list(net.parameters())[-2].grad
#                 #     av = torch.norm(fc3_weight.float(), 1).item()
#                 #     av_list.append(av)
                
#                 optimizer.step()

#         # if (epoch == 0 or epoch == epochs - 1) and net_id == 8:
#         #     with open('av_list_solo.txt', 'a') as f:
#         #         f.write('epoch: {}\n'.format(str(epoch)))
#         #         for av in av_list:
#         #             f.write(str(av) + '\n')
#         #         f.write('---------------------------------------\n')

#                 # fc3_weight = list(net.parameters())[-2].grad
#                 # av = torch.norm(fc3_weight.float(), 1).item()
#                 # av_list.append(av)

#                 # for param in net.parameters():
#                 #     print(param)
#                 #     print(param.grad)

#         #         cnt += 1
#         #         epoch_loss_collector.append(loss.item())

#         # epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)
#         # if epoch % 4 == 0:
#         #     logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))
    

#     train_acc = compute_accuracy(net, train_dataloader, device=device)
#     test_acc = compute_accuracy(net, test_dataloader, device=device)

#     logger.info('>> Training accuracy: %f' % train_acc)
#     logger.info('>> Test accuracy: %f' % test_acc)

#     net.to('cpu')
#     # logger.info(' ** Training complete **')

#     # return train_acc, test_acc


def train_net_poison_solo(target_net, net_id, net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, device="cpu"):
    # logger.info('Training network %s' % str(net_id))

    # train_acc = compute_accuracy(net, train_dataloader, device=device)
    # test_acc = compute_accuracy(net, test_dataloader, device=device)

    # logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
    # logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))

    if args_optimizer == 'adam':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg)
    elif args_optimizer == 'amsgrad':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg,
                               amsgrad=True)
    elif args_optimizer == 'sgd':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, momentum=args.rho, weight_decay=args.reg)
    criterion = nn.CrossEntropyLoss().to(device)

    cnt = 0
    if type(train_dataloader) == type([1]):
        pass
    else:
        train_dataloader = [train_dataloader]

    #writer = SummaryWriter()
        
    for epoch in range(epochs):
        av_list = []
        epoch_loss_collector = []
        for tmp in train_dataloader:
            for batch_idx, (x, target) in enumerate(tmp):
                x, target = x.to(device), target.to(device)

                optimizer.zero_grad()
                x.requires_grad = True
                target.requires_grad = False
                target = target.long()
                                
                target = target_net[batch_idx * target.shape[0] : (batch_idx + 1) * target.shape[0]]

                out = net(x)
                
                loss = criterion(out, target)

                loss.backward()
                
                optimizer.step()
                
                epoch_loss_collector.append(loss.item())

        # epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)
        
        # if epoch % 9 == 0:
        #     logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))
    

    # train_acc = compute_accuracy(net, train_dataloader, device=device)
    # test_acc = compute_accuracy(net, test_dataloader, device=device)

    # logger.info('>> Training accuracy: %f' % train_acc)
    # logger.info('>> Test accuracy: %f' % test_acc)

    net.to('cpu')
    # logger.info(' ** Training complete **')

    # return train_acc, test_acc


def train_net_poison_team(net_id, net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, device="cpu"):
    # logger.info('Training network %s' % str(net_id))

    # train_acc = compute_accuracy(net, train_dataloader, device=device)
    # test_acc, _ = compute_accuracy(net, test_dataloader, get_confusion_matrix=False, device=device)

    # logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
    # logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))

    if args_optimizer == 'adam':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg)
    elif args_optimizer == 'amsgrad':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg,
                               amsgrad=True)
    elif args_optimizer == 'sgd':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, momentum=args.rho, weight_decay=args.reg)
    criterion = nn.CrossEntropyLoss().to(device)

    cnt = 0
    if type(train_dataloader) == type([1]):
        pass
    else:
        train_dataloader = [train_dataloader]

    #writer = SummaryWriter()

    for epoch in range(epochs):
        epoch_loss_collector = []
        for tmp in train_dataloader:
            for batch_idx, (x, target) in enumerate(tmp):
                x, target = x.to(device), target.to(device)

                optimizer.zero_grad()
                x.requires_grad = True
                target.requires_grad = False
                target = target.long()
                
                target = torch.tensor(list(map(lambda x: 7 if x == 1 else x, target))).to(device)

                # net = net.to(device)

                out = net(x)
                loss = criterion(out, target)

                loss.backward()
                
                # for param in net.parameters():
                #     print(param)
                #     print(param.grad)

                optimizer.step()

        #         cnt += 1
        #         epoch_loss_collector.append(loss.item())

        # epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)
        # if epoch % 4 == 0:
        #     logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))


    # train_acc = compute_accuracy(net, train_dataloader, device=device)
    # test_acc = compute_accuracy(net, test_dataloader, device=device)

    # logger.info('>> Training accuracy: %f' % train_acc)
    # logger.info('>> Test accuracy: %f' % test_acc)

    net.to('cpu')
    # logger.info(' ** Training complete **')
    # return train_acc, test_acc


def view_image(train_dataloader):
    for (x, target) in train_dataloader:
        np.save("img.npy", x)
        print(x.shape)
        exit(0)


def local_train_net(nets, selected, args, net_dataidx_map, mode, test_dl = None, device="cpu"):
    non_shuffle = np.arange(len(selected))
    team_clients = non_shuffle[:math.floor(args.p_team * len(selected))]
    if math.floor(args.p_solo * len(selected)) > 0:
        solo_clients = non_shuffle[-math.floor(args.p_solo * len(selected)):]
    else:
        solo_clients = []  
    
    if mode == 'normal':
        team_clients, solo_clients = [], []
    
    for net_id, net in nets.items():
        if net_id not in selected:
            continue

        if args.replication == 1:
            load_model(net, net_id, args, device=device)
            continue

        dataidxs = net_dataidx_map[net_id]

        # logger.info("Training network %s. n_training: %d" % (str(net_id), len(dataidxs)))
        # move the model to cuda device:
        net.to(device)

        train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs)
        train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)
        n_epoch = args.epochs

        if net_id in solo_clients:
            # logger.info("net %d solo-toxic" % net_id)
            target_60000 = load_target()
            target_nets = torch.chunk(target_60000, int(args.p_solo * args.n_parties), dim=0)
            train_net_poison_solo(target_nets[net_id - int((1 - args.p_solo) * args.n_parties)], net_id, net, train_dl_local, test_dl_local, n_epoch, args.lr, args.optimizer, device=device)
            # train_net_poison_solo(net_id, net, train_dl_local, test_dl_local, n_epoch, args.lr, args.optimizer, device=device)
        elif net_id in team_clients:
            # logger.info("net %d team-toxic" % net_id)
            train_net_poison_team(net_id, net, train_dl_local, test_dl_local, n_epoch, args.lr, args.optimizer, device=device)
        else:
            # logger.info("net %d non-toxic" % net_id)
            if mode == 'poison':
                continue
            train_net(net_id, net, train_dl_local, test_dl_local, n_epoch, args.lr, args.optimizer, device=device)
        
        # logger.info("net %d final test acc %f" % (net_id, testacc))
            
        # saving the trained models here
        # save_model(net, net_id, args)
        
    logger.info("solo-toxic clients: [%s]" % ', '.join(list(map(lambda x: str(x), solo_clients))))
    logger.info("team-toxic clients: [%s]" % ', '.join(list(map(lambda x: str(x), team_clients))))

    nets_list = list(nets.values())
    return nets_list, solo_clients, team_clients


def get_partition_dict(dataset, partition, n_parties, init_seed=0, datadir='./data', logdir='./logs', beta=0.5):
    seed = init_seed
    np.random.seed(seed)
    torch.manual_seed(seed)
    random.seed(seed)
    X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data(
        dataset, datadir, logdir, partition, n_parties, p_team, beta=beta)

    return net_dataidx_map

def bipole(cs_scores: list, fc3_weight_list: list, client_ids: list, local_para_list: list, global_para: dict, p_solo: float, R: list, cnt: int, fcw_avg: list):
    
    round = len(R) - 1
    print("round = {}\n".format(round))

    id_fcw, css_id, cs_sums, client_with_solo, p_team = team_filter(cs_scores, fc3_weight_list, client_ids)

    # if round == 0:
    #     p_solo = compute_solo_ratio(cs_sums, len(client_ids))
    #     print("solo", p_solo)
    
    p_solo_list = [p_solo, p_solo + R[round] * args.tau]

    n_parties = len(client_ids)
    cs_avg_list, id_selected_list = [], []

    
    for i in range(2):
        id_selected = solo_filter(css_id, cs_sums, n_parties, p_solo_list[i])
        id_selected_list.append(id_selected)

        fcw_selected = [id_fcw[id] for id in id_selected]
        fcw_matrix = torch.tensor(np.array([item.cpu().detach().numpy() for item in fcw_selected]))
        
        try:
            cs_avg = mean(compute_cosine_similarity(fcw_matrix))
        except Exception as e:
            print(p_solo_list)
            print(p_team)
        
        cs_avg_list.append(cs_avg)
        
        team = set(client_ids) - set(client_with_solo)
        solo = set(client_with_solo) - set(id_selected)

        print("p_solo = {:.2f}, cs_avg = {:.7f}\nteam: {}\nsolo: {}\n".format(p_solo_list[i], cs_avg, str(team), str(solo)))

        if i == 0:
            if R[round] == 0 or (round >= 9 and (sum(R[-2:]) == 0 and sum(R[-4:]) == 0) or cnt == 3):
                choice = 0
                R.append(0)
                break
        elif cs_avg_list[0] > cs_avg_list[1]:
            choice = 1
            p_solo = p_solo + R[round] * args.tau
            p_solo = int(p_solo * 100) / 100
            if p_solo <= 0:
                p_solo = 0
                R.append(1)
            else:
                R.append(R[round])
        else:
            choice = 0
            if p_solo == 0:
                R.append(1)
                cnt += 1
                print('cnt = {}\n'.format(cnt))
            else:
                R.append(-R[round])
    
    print('final p_solo = {}, p_team = {}, R = {}\n'.format(str(p_solo), str(p_team), str(R)))
    print('------------------------------------------------------------------------------------------------------\n')

    if round != 0:
        fcw_selected = [id_fcw[id] for id in id_selected_list[choice]]
        
        for item in fcw_selected:
            arr = np.array(item.cpu().detach().reshape(1, -1).numpy())
            arr = np.append(arr, fcw_avg, axis = 0)
            fcw_matrix = torch.tensor(arr)
            cs_scores = compute_cosine_similarity(fcw_matrix)[0]
            # print(cs_scores)
            if cs_scores > 0.9:
                # fcw_selected.remove(item)
                print('bingo')
    
    id_para = {id: local_para_list[i] for i, id in enumerate(client_ids)}
    para_selected = [id_para[id] for id in id_selected_list[choice]]

    num_selected = len(para_selected)    
    for idx in range(num_selected):
        noise_para = para_selected[idx]
        if idx == 0:
            for key in noise_para:
                global_para[key] = noise_para[key] / num_selected
        else:
            for key in noise_para:
                global_para[key] += noise_para[key] / num_selected
    
    fcw_selected = [id_fcw[id] for id in id_selected_list[choice]]
    fcw_avg = np.mean(np.array([item.cpu().detach().reshape(1, -1).numpy() for item in fcw_selected]), axis = 0)

    return global_para, p_solo, p_team, R, id_selected_list[choice], css_id, cnt, fcw_avg

# def bipole(cs_scores: list, fc3_weight_list: list, client_ids: list, local_para_list: list, global_para: dict, p_solo: float, R: list, cnt: int):
    
#     round = len(R) - 1
#     print("round = {}\n".format(round))

#     id_fcw, css_id, cs_sums, client_with_solo, p_team = team_filter(cs_scores, fc3_weight_list, client_ids)

#     # if round == 0:
#     #     p_solo = compute_solo_ratio(cs_sums, len(client_ids))
#     #     print("solo", p_solo)
    
#     p_solo_list = [p_solo, p_solo + R[round] * args.tau]

#     cs_avg_list, global_para_list, id_selected_list = [], [], []

#     for i in range(2):
#         global_para, id_selected, team, solo = select_and_aggregate(css_id, cs_sums, client_ids, local_para_list, global_para, p_solo_list[i], client_with_solo)

#         fcw_selected = [id_fcw[id] for id in id_selected]
#         fcw_matrix = torch.tensor(np.array([item.cpu().detach().numpy() for item in fcw_selected]))
        
#         try:
#             cs_avg = mean(compute_cosine_similarity(fcw_matrix))
#         except Exception as e:
#             print(p_solo_list)
#             print(p_team)
        
#         cs_avg_list.append(cs_avg)
#         global_para_list.append(global_para)
#         id_selected_list.append(id_selected)

#         print("p_solo = {:.2f}, cs_avg = {:.7f}\nteam: {}\nsolo: {}\n".format(p_solo_list[i], cs_avg, str(team), str(solo)))

#         if i == 0:
#             if R[round] == 0 or (round >= 9 and (sum(R[-2:]) == 0 and sum(R[-4:]) == 0) or cnt == 3):
#                 choice = 0
#                 R.append(0)
#                 break
#         elif cs_avg_list[0] > cs_avg_list[1]:
#             choice = 1
#             p_solo = p_solo + R[round] * args.tau
#             p_solo = int(p_solo * 100) / 100
#             if p_solo <= 0:
#                 p_solo = 0
#                 R.append(1)
#             else:
#                 R.append(R[round])
#         else:
#             choice = 0
#             if p_solo == 0:
#                 R.append(1)
#                 cnt += 1
#                 print('cnt = {}\n'.format(cnt))
#             else:
#                 R.append(-R[round])
    
#     print('final p_solo = {}, p_team = {}, R = {}\n'.format(str(p_solo), str(p_team), str(R)))
#     print('------------------------------------------------------------------------------------------------------\n')
    
#     return global_para_list[choice], p_solo, p_team, R, id_selected_list[choice], css_id, cnt


# def bipole(global_model, cs_scores: list, absolute_values: list, client_addresses: list, local_para: list, global_para: dict, p_solo: float, p_team: float, R: list):
    
#     round = len(R) - 1
#     if round == 0:
#         R.append(1)
#         global_para, client_selected, _, _ = select_and_aggregate(cs_scores, absolute_values, client_addresses, local_para, global_para, 0.3, 0.5)
#         return global_para, p_solo, p_team, R, client_selected

#     acc, error_rate = [], []

#     p_solo_list = [p_solo, p_solo, p_solo + R[round] * args.tau]
#     p_team_list = [p_team, p_team + args.tau, p_team]

#     _, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)


#     for i in range(3):
#         global_para, client_selected, team, solo = select_and_aggregate(cs_scores, absolute_values, client_addresses, local_para, global_para, p_solo_list[i], p_team_list[i])
#         print("round = {}, i = {}, client_selected: {}, team: {}, solo: {}\n".format(str(round), str(i), str(client_selected), str(team), str(solo)))
#         global_model.load_state_dict(global_para)
#         global_model.to(device)
        
#         if i != 1:
#             test_acc = compute_accuracy(global_model, test_dl_global, device=device)
#             acc.append(test_acc)
#             print("round = {}, i = {}, acc: {}\n".format(str(round), str(i), str(test_acc)))

#         if i != 2:
#             error_rate_1_to_7 = compute_17_error_rate(global_model, test_dl_global, device=device)
#             error_rate.append(error_rate_1_to_7)
#             print("round = {}, i = {}, error rate: {}\n".format(str(round), str(i), str(error_rate_1_to_7)))
        
#         if i == 1:
#             if error_rate[0] - error_rate[1] > 0.2:
#                 p_team = p_team + args.tau

#             if R[round] == 0 or (round > 3 and abs(sum(R[round - 4:])) < 2):
#                 global_para, client_selected, team, solo = select_and_aggregate(cs_scores, absolute_values, client_addresses, local_para, global_para, p_solo, p_team)
#                 print("round = {}, final i = 1, client_selected: {}, team: {}, solo: {}\n".format(str(round), str(client_selected), str(team), str(solo)))
#                 R.append(0)
#                 break
        
#         if i == 2:
#             if acc[0] < acc[1]:
#                 p_solo = p_solo + R[round] * args.tau
#                 if p_solo < 0:
#                     p_solo == 0
#                     R.append(1)
#                 else:
#                     R.append(R[round])
#             else:
#                 R.append(-R[round])
            
#             global_para, client_selected, team, solo = select_and_aggregate(cs_scores, absolute_values, client_addresses, local_para, global_para, p_solo, p_team)
#             print("round = {}, final i = 2, client_selected: {}, team: {}, solo: {}\n".format(str(round), str(client_selected), str(team), str(solo)))
    
#     print('p_solo = {}, p_team = {}, R = {}\n'.format(str(p_solo), str(p_team), str(R)))
#     print('-----------------------------------------------------\n')
    
#     return global_para, p_solo, p_team, R, client_selected



if __name__ == '__main__':
    # torch.set_printoptions(profile="full")
    args = get_args()
    mkdirs(args.logdir)
    mkdirs(args.modeldir)
    # if args.log_file_name is None:
    #     argument_path='experiment_arguments-%s.json' % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    # else:
    #     argument_path=args.log_file_name+'.json'
    # with open(os.path.join(args.logdir, argument_path), 'w') as f:
    #     json.dump(str(args), f)
    device = torch.device(args.device)
    device_backup = torch.device('cuda:1')
    # logging.basicConfig(filename='test.log', level=logger.info, filemode='w')
    # logging.info("test")
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)

    if args.log_file_name is None:
        # args.log_file_name = 'experiment_log-%s' % (datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
        tunable_para = [args.model, args.dataset, args.epochs, args.comm_round, args.n_parties, args.beta, args.p_solo, args.p_team, args.tau, args.alg, args.partition]
        args.log_file_name = "{0[9]}-{0[0]}-{0[1]}-epoch{0[2]}-round{0[3]}-party{0[4]}-beta{0[5]}-solo{0[6]}-team{0[7]}-tau{0[8]}-{0[10]}".format(tunable_para)
        # args.log_file_name += '-%s' % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    log_path=args.log_file_name+'.log'
    logging.basicConfig(
        filename=os.path.join(args.logdir, log_path),
        # filename='/home/qinbin/test.log',
        format='%(asctime)s %(levelname)-8s %(message)s',
        datefmt='%m-%d %H-%M', level=logging.DEBUG, filemode='w')

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.info(device)

    seed = args.init_seed
    logger.info("#" * 100)
    np.random.seed(seed)
    torch.manual_seed(seed)
    random.seed(seed)
    logger.info("Partitioning data")

    _, _, _, _, net_dataidx_map_n, _ = partition_data(
        args.dataset, args.datadir, args.logdir, 'normal-noniid', args.n_parties, 0, beta=args.beta)
    
    X_train, y_train, X_test, y_test, net_dataidx_map_p, traindata_cls_counts = partition_data(
        args.dataset, args.datadir, args.logdir, args.partition, args.n_parties, args.p_team, beta=args.beta)

    n_classes = len(np.unique(y_train))

    train_dl_global, test_dl_global, train_ds_global, test_ds_global = get_dataloader(args.dataset,
                                                                                        args.datadir,
                                                                                        args.batch_size,
                                                                                        32)

    print("len train_dl_global:", len(train_ds_global))

    data_size = len(test_ds_global)

    # test_dl = data.DataLoader(dataset=test_ds_global, batch_size=32, shuffle=False)

    train_all_in_list = []
    test_all_in_list = []


    if args.alg == 'bipole':
        logger.info("Initializing nets")
        nets_n, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
        nets_p, _, _ = init_nets(args.net_config, args.dropout_p, args.n_parties, args)

        global_models, global_model_meta_data, global_layer_type = init_nets(args.net_config, 0, 1, args)
        global_model = global_models[0]

        # if args.replication == 1:
        #     global_model = load_model(global_model, 'global_model', args, device=device)
        
        global_para = global_model.state_dict()

        # error_rate_1_to_7 = compute_17_error_rate(global_model, test_dl_global, device=device)

        if args.is_same_initial:
            for net_id, net in nets_n.items():
                net.load_state_dict(global_para)
            for net_id, net in nets_p.items():
                net.load_state_dict(global_para)

        solo_selected_list = []
        team_selected_list = []
        test_acc_list = []
        error_rate_1_to_7_list = []

        fcw_avg = 0
        for round in range(args.comm_round):
            logger.info("-------------------------------------------\n")
            logger.info("in comm round:" + str(round))
            
            arr = np.arange(args.n_parties)
            # np.random.shuffle(arr)
            selected = arr[:int(args.n_parties * args.sample)]  #选中的客户端

            global_para = global_model.state_dict()
            if round == 0:
                if args.is_same_initial:
                    for idx in selected:
                        nets_n[idx].load_state_dict(global_para)
                        nets_p[idx].load_state_dict(global_para)
            else:
                for idx in selected:
                    nets_n[idx].load_state_dict(global_para)
                    nets_p[idx].load_state_dict(global_para)
            
            # sub_nets = split_dict_by_count(nets, 2)
            # p1 = multiprocessing.Process(target=local_train_net(sub_nets[0], selected, args, net_dataidx_map, test_dl = test_dl_global, device=device))
            # p2 = multiprocessing.Process(target=local_train_net(sub_nets[1], selected, args, net_dataidx_map, test_dl = test_dl_global, device='cuda:1'))
            # p1.start() # p2.start() # p1.join() # p2.join()
            
            if round == 0:
                p_solo, p_team, R = 0, 0, [1]

                cnt = 0

                logger.info('initial p_solo: %f' % p_solo)
                logger.info('initial p_team: %f' % p_team)

            local_train_net(nets_n, selected, args, net_dataidx_map_n, 'normal', test_dl = test_dl_global, device=device)
            _, solo_clients, team_clients = local_train_net(nets_p, selected, args, net_dataidx_map_p, 'poison', test_dl = test_dl_global, device=device)

            # update global model
            normal_para_list = [nets_n[idx].cpu().state_dict() for idx in selected]
            team_para_list = [nets_p[idx].cpu().state_dict() for idx in team_clients]
            solo_para_list = [nets_p[idx].cpu().state_dict() for idx in solo_clients]

            local_para_list = [*normal_para_list, *team_para_list, *solo_para_list]
            # random.shuffle(local_para_list)

            selected = np.arange(len(local_para_list))

            # local_para_matrix = to_matrix(local_para_list)
            # scores = compute_cosine_similarity(local_para_matrix)

            gradient_list = [compute_gradient(global_model.cpu().state_dict(), local_para) for local_para in local_para_list]
            fc3_weight_list = [torch.squeeze(gradient['fc3.weight'].reshape(1, -1)) for gradient in gradient_list]

            # if round == 0:
            #     proj, y = tsne(fc3_weight_list, args.p_solo, args.p_team)
            #     scatter(proj, y)
            #     plt.savefig(r'C:\\Users\\23638\\Desktop\\Gradients.jpg',
            #                 dpi=200, bbox_inches = 'tight')
            #     plt.show()

            # absolute_and_max(gradient_list, tunable_para[-1], round)

            # save_gradient(gradient_list, round)

            gradient_matrix = torch.tensor(np.array([item.cpu().detach().numpy() for item in fc3_weight_list]))

            cs_scores = compute_cosine_similarity(gradient_matrix)
            
            # cs_sums = cosine_similarity_sum(gradient_matrix)
            id_score_global, keys_global = cosine_similarity_sort(gradient_matrix)

            global_para, p_solo, p_team, R, client_selected, css_id, cnt, fcw_avg = bipole(cs_scores, fc3_weight_list, selected, local_para_list, global_para, p_solo, R, cnt, fcw_avg)
            
            # with open ('gradient_similarity_' + args.partition + '.txt', 'a') as f:
            #     current_round = "Round: " + str(round) + '\n'
            #     f.write(current_round)

            #     css_id = dict(sorted(css_id.items(), key=lambda d: d[1], reverse=False))

            #     # for css, id in css_id.items():
            #     #     f.write('Client ' + str(id) + ': '+ str(css) + '\n')
            #     # f.write('---------------------------------\n')

            #     # for cs_sum, id_score in zip(cs_sums, id_score_global.items()):
            #     #     f.write('Client ' + str(id_score[0]) + ': '+ str(cs_sum) + '\n')
            #     #     f.write(str(id_score[1]) + '\n')
            #     #     f.write('---------------------------------\n')
                
            #     # for keys, id_score in zip(keys_global, id_score_global.items()):
            #     #     f.write('Client ' + str(id_score[0]) + ': '+ str(keys) + '\n')
            #     #     f.write(str(id_score[1]) + '\n')
            #     # f.write('---------------------------------\n')

            #     # for id, score in id_score_global.items():
            #     #     f.write('Client ' + str(id) + ': '+ str(next(iter(score.items()))) + '\n')
            #     # f.write('---------------------------------\n')

            #     for css, id in css_id.items():
            #         f.write('Client ' + str(id) + ', cs: ' + str(next(iter(id_score_global[id].items()))) + ', cs_sum: ' + str(css) + '\n')

            logger.info('p_solo: %f' % p_solo)
            logger.info('p_team: %f' % p_team)
            logger.info('R: %s' % str(R))

            # global_para, client_selected, _, _ = select_and_aggregate(cs_scores, cs_sums, selected, local_para_list, global_para, p_solo, p_team)
            
            solo_selected = ([i in client_selected for i in solo_clients].count(1)) / len(client_selected)
            team_selected = ([i in client_selected for i in team_clients].count(1)) / len(client_selected)
            
            solo_selected_list.append(np.around(solo_selected, 3))
            team_selected_list.append(np.around(team_selected, 3))

            logger.info('client_selected: %s' % str(client_selected))
            logger.info('solo_selected: %s' % str(solo_selected))
            logger.info('team_selected: %s' % str(team_selected))

            global_model.load_state_dict(copy.deepcopy(global_para))

            # save_model(global_model, 'global_model', args)

            # logger.info('global n_training: %d' % len(train_dl_global))
            # logger.info('global n_test: %d' % len(test_dl_global))

            global_model.to(device)
            train_acc = compute_accuracy(global_model, train_dl_global, device=device)
            test_acc = compute_accuracy(global_model, test_dl_global, device=device)

            error_rate_1_to_7 = compute_17_error_rate(global_model, test_dl_global, device=device)

            test_acc_list.append(np.around(test_acc, 3))
            error_rate_1_to_7_list.append(np.around(error_rate_1_to_7, 3))

            logger.info('>> Global Model Train accuracy: %f' % train_acc)
            logger.info('>> Global Model Test accuracy: %f' % test_acc)
            logger.info('>> Error rate 1 to 7: %f' % error_rate_1_to_7)
        
        solo_selected_list.append(statistics.mean(solo_selected_list))
        team_selected_list.append(statistics.mean(team_selected_list))
        test_acc_list.append(statistics.mean(test_acc_list))
        error_rate_1_to_7_list.append(statistics.mean(error_rate_1_to_7_list))

        logger.info('solo_selected_list: %s' % str(solo_selected_list))
        logger.info('team_selected_list: %s' % str(team_selected_list))
        logger.info('test_acc_list: %s' % str(test_acc_list))
        logger.info('error_rate_1_to_7_list: %s' % str(error_rate_1_to_7_list))


    elif args.alg == 'fedavg':
        logger.info("Initializing nets")
        nets_n, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
        nets_p, _, _ = init_nets(args.net_config, args.dropout_p, args.n_parties, args)

        global_models, global_model_meta_data, global_layer_type = init_nets(args.net_config, 0, 1, args)
        global_model = global_models[0]

        global_para = global_model.state_dict()
        if args.is_same_initial:
            for net_id, net in nets_n.items():
                net.load_state_dict(global_para)
            for net_id, net in nets_p.items():
                net.load_state_dict(global_para)

        for round in range(args.comm_round):
            logger.info("in comm round:" + str(round))

            arr = np.arange(args.n_parties)
            selected = arr[:int(args.n_parties * args.sample)]  #选中的客户端
            
            team_clients = selected[:math.floor(args.p_team * args.n_parties)]
            if math.floor(args.p_solo * args.n_parties) > 0:
                solo_clients = selected[-math.floor(args.p_solo * args.n_parties):]
            else:
                solo_clients = []
            
            selected_p = [*team_clients, *solo_clients]

            global_para = global_model.state_dict()
            if round == 0:
                if args.is_same_initial:
                    for idx in selected:
                        nets_n[idx].load_state_dict(global_para)
                        nets_p[idx].load_state_dict(global_para)
            else:
                for idx in selected:
                    nets_n[idx].load_state_dict(global_para)
                    nets_p[idx].load_state_dict(global_para)

            local_train_net(nets_n, selected, args, net_dataidx_map_n, 'normal', test_dl = test_dl_global, device=device)
            _, solo_clients, team_clients = local_train_net(nets_p, selected, args, net_dataidx_map_p, 'poison', test_dl = test_dl_global, device=device)

            # update global model            
            total_data_points_n = sum([len(net_dataidx_map_n[r]) for r in selected])
            total_data_points_p = sum([len(net_dataidx_map_p[r]) for r in selected_p])
            total_data_points = total_data_points_n + total_data_points_p
            fed_avg_freqs_n = [len(net_dataidx_map_n[r]) / total_data_points for r in selected]
            fed_avg_freqs_p = [len(net_dataidx_map_p[r]) / total_data_points for r in selected_p]

            for idx in range(len(selected)):
                net_para = nets_n[selected[idx]].cpu().state_dict()
                if idx == 0:
                    for key in net_para:
                        global_para[key] = net_para[key] * fed_avg_freqs_n[idx]
                else:
                    for key in net_para:
                        global_para[key] += net_para[key] * fed_avg_freqs_n[idx]
            
            for idx in range(len(selected_p)):
                net_para = nets_p[selected[idx]].cpu().state_dict()
                for key in net_para:
                    global_para[key] += net_para[key] * fed_avg_freqs_p[idx] * len(selected) / len(selected_p)

            global_model.load_state_dict(global_para)

            # save_model(global_model, 'global_model', args)

            logger.info('global n_training: %d' % len(train_dl_global))
            logger.info('global n_test: %d' % len(test_dl_global))

            global_model.to(device)
            train_acc = compute_accuracy(global_model, train_dl_global, device=device)
            test_acc, conf_matrix = compute_accuracy(global_model, test_dl_global, get_confusion_matrix=True, device=device)
            error_rate_1_to_7 = compute_17_error_rate(global_model, test_dl_global, device=device)

            logger.info('>> Global Model Train accuracy: %f' % train_acc)
            logger.info('>> Global Model Test accuracy: %f' % test_acc)
            logger.info('>> Error rate 1 to 7: %f' % error_rate_1_to_7)
    
    elif args.alg == 'foolsgold':
        logger.info("Initializing nets")
        nets_n, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
        nets_p, _, _ = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
        global_models, global_model_meta_data, global_layer_type = init_nets(args.net_config, 0, 1, args)
        global_model = global_models[0]

        global_para = global_model.state_dict()
        if args.is_same_initial:
            for net_id, net in nets_n.items():
                net.load_state_dict(global_para)
            for net_id, net in nets_p.items():
                net.load_state_dict(global_para)

        for round in range(args.comm_round):
            logger.info("in comm round:" + str(round))

            arr = np.arange(args.n_parties)
            selected = arr[:int(args.n_parties * args.sample)]  #选中的客户端

            global_para = global_model.state_dict()
            if round == 0:
                if args.is_same_initial:
                    for idx in selected:
                        nets_n[idx].load_state_dict(global_para)
                        nets_p[idx].load_state_dict(global_para)
            else:
                for idx in selected:
                    nets_n[idx].load_state_dict(global_para)
                    nets_p[idx].load_state_dict(global_para)

            local_train_net(nets_n, selected, args, net_dataidx_map_n, 'normal', test_dl = test_dl_global, device=device)
            _, solo_clients, team_clients = local_train_net(nets_p, selected, args, net_dataidx_map_p, 'poison', test_dl = test_dl_global, device=device)

            normal_para_list = [nets_n[idx].cpu().state_dict() for idx in selected]
            team_para_list = [nets_p[idx].cpu().state_dict() for idx in team_clients]
            solo_para_list = [nets_p[idx].cpu().state_dict() for idx in solo_clients]

            local_para_list = [*normal_para_list, *team_para_list, *solo_para_list]

            gradient_list = [compute_gradient(global_model.cpu().state_dict(), local_para) for local_para in local_para_list]
            fc3_weight_list = [torch.squeeze(gradient['fc3.weight'].reshape(1, -1)) for gradient in gradient_list]
            gradient_matrix = torch.tensor(np.array([item.cpu().detach().numpy() for item in fc3_weight_list]))
            w = foolsgold(gradient_matrix)
            w = w / w.sum()

            selected_p = [*selected[:math.floor(args.p_team * args.n_parties)], *selected[-math.floor(args.p_solo * args.n_parties):]]
            selected_p = [idx + args.n_parties for idx in selected_p]
            selected = [*selected, *selected_p]

            nets_pp = {k + args.n_parties: v for k, v in nets_p.items()}
            nets = {**nets_n, **nets_pp}

            for idx in range(len(selected)):
                net_para = nets[selected[idx]].cpu().state_dict()
                if idx == 0:
                    for key in net_para:
                        global_para[key] = net_para[key] * w[idx]
                else:
                    for key in net_para:
                        global_para[key] += net_para[key] * w[idx]
            global_model.load_state_dict(global_para)

            global_model.to(device)
            train_acc = compute_accuracy(global_model, train_dl_global, device=device)
            test_acc, conf_matrix = compute_accuracy(global_model, test_dl_global, get_confusion_matrix=True, device=device)

            logger.info('>> Global Model Train accuracy: %f' % train_acc)
            logger.info('>> Global Model Test accuracy: %f' % test_acc)

    elif args.alg == 'multi-krum':
        logger.info("Initializing nets")
        nets_n, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
        nets_p, _, _ = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
        global_models, global_model_meta_data, global_layer_type = init_nets(args.net_config, 0, 1, args)
        global_model = global_models[0]

        global_para = global_model.state_dict()
        if args.is_same_initial:
            for net_id, net in nets_n.items():
                net.load_state_dict(global_para)
            for net_id, net in nets_p.items():
                net.load_state_dict(global_para)

        for round in range(args.comm_round):
            logger.info("in comm round:" + str(round))

            arr = np.arange(args.n_parties)
            # np.random.shuffle(arr)
            selected = arr[:int(args.n_parties * args.sample)]  #选中的客户端

            global_para = global_model.state_dict()
            if round == 0:
                if args.is_same_initial:
                    for idx in selected:
                        nets_n[idx].load_state_dict(global_para)
                        nets_p[idx].load_state_dict(global_para)
            else:
                for idx in selected:
                    nets_n[idx].load_state_dict(global_para)
                    nets_p[idx].load_state_dict(global_para)

            local_train_net(nets_n, selected, args, net_dataidx_map_n, 'normal', test_dl = test_dl_global, device=device)
            _, solo_clients, team_clients = local_train_net(nets_p, selected, args, net_dataidx_map_p, 'poison', test_dl = test_dl_global, device=device)

            normal_para_list = [nets_n[idx].cpu().state_dict() for idx in selected]
            team_para_list = [nets_p[idx].cpu().state_dict() for idx in team_clients]
            solo_para_list = [nets_p[idx].cpu().state_dict() for idx in solo_clients]

            local_para_list = [*normal_para_list, *team_para_list, *solo_para_list]

            # update global model
            gradient_list = [compute_gradient(global_model.cpu().state_dict(), local_para) for local_para in local_para_list]
            fc3_weight_list = [torch.squeeze(gradient['fc3.weight'].reshape(1, -1)) for gradient in gradient_list]
            gradient_matrix = torch.tensor(np.array([item.cpu().detach().numpy() for item in fc3_weight_list]))
            
            sel = getKrum(gradient_matrix).numpy()

            nets_pp = {k + args.n_parties: v for k, v in nets_p.items()}
            nets = {**nets_n, **nets_pp}

            # update global model
            for idx in range(len(sel)):
                net_para = nets[idx].cpu().state_dict()
                if idx == 0:
                    for key in net_para:
                        global_para[key] = net_para[key] * 1 / len(sel)
                else:
                    for key in net_para:
                        global_para[key] += net_para[key] * 1 / len(sel)
            global_model.load_state_dict(global_para)

            # save_model(global_model, 'global_model', args)

            logger.info('global n_training: %d' % len(train_dl_global))
            logger.info('global n_test: %d' % len(test_dl_global))

            global_model.to(device)
            train_acc = compute_accuracy(global_model, train_dl_global, device=device)
            test_acc, conf_matrix = compute_accuracy(global_model, test_dl_global, get_confusion_matrix=True, device=device)


            logger.info('>> Global Model Train accuracy: %f' % train_acc)
            logger.info('>> Global Model Test accuracy: %f' % test_acc)