import os
import logging
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
from torch.autograd import Variable
import torch.nn.functional as F
import random
from sklearn.metrics import confusion_matrix
from torch.utils.data import DataLoader
import copy

from model import *
from datasets import MNIST_truncated, CIFAR10_truncated, CIFAR100_truncated, ImageFolder_custom, SVHN_custom, FashionMNIST_truncated, CustomTensorDataset, CelebA_custom, FEMNIST, Generated, genData
from math import sqrt

import torch.nn as nn

import torch.optim as optim
import torchvision.utils as vutils
import time
import random

# from models.mnist_model import Generator, Discriminator, DHead, QHead
from config import params
import sklearn.datasets as sk
from sklearn.datasets import load_svmlight_file

from PIL import Image
import sklearn.metrics.pairwise as smp
import pdb
from statistics import mean
import matplotlib.pyplot as plt


logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)

def mkdirs(dirpath):
    try:
        os.makedirs(dirpath)
    except Exception as _:
        pass

def load_mnist_data(datadir):

    transform = transforms.Compose([transforms.ToTensor()])

    mnist_train_ds = MNIST_truncated(datadir, train=True, download=True, transform=transform)
    mnist_test_ds = MNIST_truncated(datadir, train=False, download=True, transform=transform)

    X_train, y_train = mnist_train_ds.data, mnist_train_ds.target
    X_test, y_test = mnist_test_ds.data, mnist_test_ds.target

    X_train = X_train.data.numpy()
    y_train = y_train.data.numpy()
    X_test = X_test.data.numpy()
    y_test = y_test.data.numpy()

    return (X_train, y_train, X_test, y_test)

def load_fmnist_data(datadir):

    transform = transforms.Compose([transforms.ToTensor()])

    mnist_train_ds = FashionMNIST_truncated(datadir, train=True, download=True, transform=transform)
    mnist_test_ds = FashionMNIST_truncated(datadir, train=False, download=True, transform=transform)

    X_train, y_train = mnist_train_ds.data, mnist_train_ds.target
    X_test, y_test = mnist_test_ds.data, mnist_test_ds.target

    X_train = X_train.data.numpy()
    y_train = y_train.data.numpy()
    X_test = X_test.data.numpy()
    y_test = y_test.data.numpy()

    return (X_train, y_train, X_test, y_test)

def load_svhn_data(datadir):

    transform = transforms.Compose([transforms.ToTensor()])

    svhn_train_ds = SVHN_custom(datadir, train=True, download=True, transform=transform)
    svhn_test_ds = SVHN_custom(datadir, train=False, download=True, transform=transform)

    X_train, y_train = svhn_train_ds.data, svhn_train_ds.target
    X_test, y_test = svhn_test_ds.data, svhn_test_ds.target

    # X_train = X_train.data.numpy()
    # y_train = y_train.data.numpy()
    # X_test = X_test.data.numpy()
    # y_test = y_test.data.numpy()

    return (X_train, y_train, X_test, y_test)


def load_cifar10_data(datadir):

    transform = transforms.Compose([transforms.ToTensor()])

    cifar10_train_ds = CIFAR10_truncated(datadir, train=True, download=True, transform=transform)
    cifar10_test_ds = CIFAR10_truncated(datadir, train=False, download=True, transform=transform)

    X_train, y_train = cifar10_train_ds.data, cifar10_train_ds.target
    X_test, y_test = cifar10_test_ds.data, cifar10_test_ds.target

    # y_train = y_train.numpy()
    # y_test = y_test.numpy()

    return (X_train, y_train, X_test, y_test)

def load_celeba_data(datadir):

    transform = transforms.Compose([transforms.ToTensor()])

    celeba_train_ds = CelebA_custom(datadir, split='train', target_type="attr", download=True, transform=transform)
    celeba_test_ds = CelebA_custom(datadir, split='test', target_type="attr", download=True, transform=transform)

    gender_index = celeba_train_ds.attr_names.index('Male')
    y_train =  celeba_train_ds.attr[:,gender_index:gender_index+1].reshape(-1)
    y_test = celeba_test_ds.attr[:,gender_index:gender_index+1].reshape(-1)

    # y_train = y_train.numpy()
    # y_test = y_test.numpy()

    return (None, y_train, None, y_test)

def load_femnist_data(datadir):
    transform = transforms.Compose([transforms.ToTensor()])

    mnist_train_ds = FEMNIST(datadir, train=True, transform=transform, download=True)
    mnist_test_ds = FEMNIST(datadir, train=False, transform=transform, download=True)

    X_train, y_train, u_train = mnist_train_ds.data, mnist_train_ds.targets, mnist_train_ds.users_index
    X_test, y_test, u_test = mnist_test_ds.data, mnist_test_ds.targets, mnist_test_ds.users_index

    X_train = X_train.data.numpy()
    y_train = y_train.data.numpy()
    u_train = np.array(u_train)
    X_test = X_test.data.numpy()
    y_test = y_test.data.numpy()
    u_test = np.array(u_test)

    return (X_train, y_train, u_train, X_test, y_test, u_test)

def load_cifar100_data(datadir):
    transform = transforms.Compose([transforms.ToTensor()])

    cifar100_train_ds = CIFAR100_truncated(datadir, train=True, download=True, transform=transform)
    cifar100_test_ds = CIFAR100_truncated(datadir, train=False, download=True, transform=transform)

    X_train, y_train = cifar100_train_ds.data, cifar100_train_ds.target
    X_test, y_test = cifar100_test_ds.data, cifar100_test_ds.target

    # y_train = y_train.numpy()
    # y_test = y_test.numpy()

    return (X_train, y_train, X_test, y_test)


def load_tinyimagenet_data(datadir):
    transform = transforms.Compose([transforms.ToTensor()])
    xray_train_ds = ImageFolder_custom(datadir+'./train/', transform=transform)
    xray_test_ds = ImageFolder_custom(datadir+'./val/', transform=transform)

    X_train, y_train = np.array([s[0] for s in xray_train_ds.samples]), np.array([int(s[1]) for s in xray_train_ds.samples])
    X_test, y_test = np.array([s[0] for s in xray_test_ds.samples]), np.array([int(s[1]) for s in xray_test_ds.samples])

    return (X_train, y_train, X_test, y_test)

def record_net_data_stats(y_train, net_dataidx_map, logdir):

    net_cls_counts = {}

    for net_i, dataidx in net_dataidx_map.items():
        unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True)
        tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
        net_cls_counts[net_i] = tmp

    logger.info('Data statistics: %s' % str(net_cls_counts))

    return net_cls_counts

def partition_data(dataset, datadir, logdir, partition, n_parties, p_team, beta=0.5):
    #np.random.seed(2020)
    #torch.manual_seed(2020)

    if dataset == 'mnist':
        X_train, y_train, X_test, y_test = load_mnist_data(datadir)
    elif dataset == 'fmnist':
        X_train, y_train, X_test, y_test = load_fmnist_data(datadir)
    elif dataset == 'cifar10':
        X_train, y_train, X_test, y_test = load_cifar10_data(datadir)
    elif dataset == 'svhn':
        X_train, y_train, X_test, y_test = load_svhn_data(datadir)
    elif dataset == 'celeba':
        X_train, y_train, X_test, y_test = load_celeba_data(datadir)
    elif dataset == 'femnist':
        X_train, y_train, u_train, X_test, y_test, u_test = load_femnist_data(datadir)
    elif dataset == 'cifar100':
        X_train, y_train, X_test, y_test = load_cifar100_data(datadir)
    elif dataset == 'tinyimagenet':
        X_train, y_train, X_test, y_test = load_tinyimagenet_data(datadir)
    elif dataset == 'generated':
        X_train, y_train = [], []
        for loc in range(4):
            for i in range(1000):
                p1 = random.random()
                p2 = random.random()
                p3 = random.random()
                if loc > 1:
                    p2 = -p2
                if loc % 2 ==1:
                    p3 = -p3
                if i % 2 == 0:
                    X_train.append([p1, p2, p3])
                    y_train.append(0)
                else:
                    X_train.append([-p1, -p2, -p3])
                    y_train.append(1)
        X_test, y_test = [], []
        for i in range(1000):
            p1 = random.random() * 2 - 1
            p2 = random.random() * 2 - 1
            p3 = random.random() * 2 - 1
            X_test.append([p1, p2, p3])
            if p1>0:
                y_test.append(0)
            else:
                y_test.append(1)
        X_train = np.array(X_train, dtype=np.float32)
        X_test = np.array(X_test, dtype=np.float32)
        y_train = np.array(y_train, dtype=np.int32)
        y_test = np.array(y_test, dtype=np.int64)
        idxs = np.linspace(0,3999,4000,dtype=np.int64)
        batch_idxs = np.array_split(idxs, n_parties)
        net_dataidx_map = {i: batch_idxs[i] for i in range(n_parties)}
        mkdirs("data/generated/")
        np.save("data/generated/X_train.npy",X_train)
        np.save("data/generated/X_test.npy",X_test)
        np.save("data/generated/y_train.npy",y_train)
        np.save("data/generated/y_test.npy",y_test)
    
    #elif dataset == 'covtype':
    #    cov_type = sk.fetch_covtype('./data')
    #    num_train = int(581012 * 0.75)
    #    idxs = np.random.permutation(581012)
    #    X_train = np.array(cov_type['data'][idxs[:num_train]], dtype=np.float32)
    #    y_train = np.array(cov_type['target'][idxs[:num_train]], dtype=np.int32) - 1
    #    X_test = np.array(cov_type['data'][idxs[num_train:]], dtype=np.float32)
    #    y_test = np.array(cov_type['target'][idxs[num_train:]], dtype=np.int32) - 1
    #    mkdirs("data/generated/")
    #    np.save("data/generated/X_train.npy",X_train)
    #    np.save("data/generated/X_test.npy",X_test)
    #    np.save("data/generated/y_train.npy",y_train)
    #    np.save("data/generated/y_test.npy",y_test)

    elif dataset in ('rcv1', 'SUSY', 'covtype'):
        X_train, y_train = load_svmlight_file(datadir+dataset)
        X_train = X_train.todense()
        num_train = int(X_train.shape[0] * 0.75)
        if dataset == 'covtype':
            y_train = y_train-1
        else:
            y_train = (y_train+1)/2
        idxs = np.random.permutation(X_train.shape[0])

        X_test = np.array(X_train[idxs[num_train:]], dtype=np.float32)
        y_test = np.array(y_train[idxs[num_train:]], dtype=np.int32)
        X_train = np.array(X_train[idxs[:num_train]], dtype=np.float32)
        y_train = np.array(y_train[idxs[:num_train]], dtype=np.int32)

        mkdirs("data/generated/")
        np.save("data/generated/X_train.npy",X_train)
        np.save("data/generated/X_test.npy",X_test)
        np.save("data/generated/y_train.npy",y_train)
        np.save("data/generated/y_test.npy",y_test)

    elif dataset in ('a9a'):
        X_train, y_train = load_svmlight_file(datadir+"a9a")
        X_test, y_test = load_svmlight_file(datadir+"a9a.t")
        X_train = X_train.todense()
        X_test = X_test.todense()
        X_test = np.c_[X_test, np.zeros((len(y_test), X_train.shape[1] - np.size(X_test[0, :])))]

        X_train = np.array(X_train, dtype=np.float32)
        X_test = np.array(X_test, dtype=np.float32)
        y_train = (y_train+1)/2
        y_test = (y_test+1)/2
        y_train = np.array(y_train, dtype=np.int32)
        y_test = np.array(y_test, dtype=np.int32)

        mkdirs("data/generated/")
        np.save("data/generated/X_train.npy",X_train)
        np.save("data/generated/X_test.npy",X_test)
        np.save("data/generated/y_train.npy",y_train)
        np.save("data/generated/y_test.npy",y_test)


    n_train = y_train.shape[0]

    if partition == "homo":
        idxs = np.random.permutation(n_train)
        batch_idxs = np.array_split(idxs, n_parties)
        net_dataidx_map = {i: batch_idxs[i] for i in range(n_parties)}
        # {0: array([7, 8]), 1: array([2, 1]), 2: array([0, 4]), 3: array([9, 5]), 4: array([3, 6])}
    
    elif partition == "normal-noniid":
        min_size = 0
        min_require_size = 10
        K = 10
        net_dataidx_map = {}

        while min_size < min_require_size:
            idx_batch = [[] for _ in range(n_parties)]
            for k in range(K):
                idx_k = np.where(y_train == k)[0]
                np.random.shuffle(idx_k)
                proportions = np.random.dirichlet(np.repeat(beta, n_parties))
                ## Balance
                # proportions = np.array([p * (len(idx_j) < N / n_parties) for p, idx_j in zip(proportions, idx_batch)])
                proportions = proportions / proportions.sum()
                proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
                idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
                min_size = min([len(idx_j) for idx_j in idx_batch])

        for j in range(n_parties):
            np.random.shuffle(idx_batch[j])
            net_dataidx_map[j] = idx_batch[j]
    
    elif partition == "homo-jza":
        K = 10
        net_dataidx_map = {}
        idx_batch = [[] for _ in range(n_parties)]
        N = y_train.shape[0]

        for k in range(K):
            idx_k = np.where(y_train == k)[0]   #标签为k的图像对应的下标
            np.random.shuffle(idx_k)
            if k == 1:
                proportions = np.repeat(1 / n_parties, n_parties)
                proportions_poison = np.array([((pos < 3) * 4 + 1) * val for pos, val in enumerate(proportions)])
                proportions = np.append(proportions_poison[:3], proportions[math.floor(p_team * n_parties):])
                # proportions_poison = np.array([((pos < math.floor(p_team * n_parties)) * 4 + 1) * val for pos, val in enumerate(proportions)])
                # proportions = np.append(proportions_poison[:math.floor(p_team * n_parties)], proportions[math.floor(p_team * n_parties):])
            # elif k == 7:
            #     proportions = np.repeat(1 / n_parties, n_parties)
            #     proportions_poison = np.array([((pos < math.floor(p_team * n_parties)) * -1 + 1) * val for pos, val in enumerate(proportions)])
            #     proportions_normal = np.random.dirichlet(np.repeat(beta, n_parties))
            #     proportions = np.append(proportions_poison[:math.floor(p_team * n_parties)], proportions_normal[math.floor(p_team * n_parties):])
            else:
                proportions = np.repeat(1 / n_parties, n_parties)
                proportions_poison = np.zeros(math.floor(p_team * n_parties))
                proportions = np.append(proportions_poison, proportions[math.floor(p_team * n_parties):])
                # print("proportions:", proportions)
            # else:
            #     proportions = np.random.dirichlet(np.repeat(beta, n_parties))
            # proportions = np.array([p * (len(idx_j) < N / n_parties) for p, idx_j in zip(proportions, idx_batch)])
            proportions = proportions / proportions.sum()
            proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
            # print("nums {}: {}".format(k, proportions))
            
            idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
        
        for j in range(n_parties):
            np.random.shuffle(idx_batch[j])
            net_dataidx_map[j] = idx_batch[j]

    elif partition == "noniid-labeldir":
        min_size = 0
        min_require_size = 10
        K = 10
        if dataset in ('celeba', 'covtype', 'a9a', 'rcv1', 'SUSY'):
            K = 2
            # min_require_size = 100
        if dataset == 'cifar100':
            K = 100
        elif dataset == 'tinyimagenet':
            K = 200


        N = y_train.shape[0]
        #np.random.seed(2020)
        net_dataidx_map = {}

        while min_size < min_require_size:
            idx_batch = [[] for _ in range(n_parties)]
            for k in range(K):
                idx_k = np.where(y_train == k)[0]   #标签为k的图像对应的下标
                np.random.shuffle(idx_k)
                if k == 1:
                    proportions = np.repeat(1 / n_parties, n_parties)
                    proportions_poison = np.array([((pos < math.floor(p_team * n_parties)) * 4 + 1) * val for pos, val in enumerate(proportions)])
                    proportions_normal = np.random.dirichlet(np.repeat(beta, n_parties))
                    proportions = np.append(proportions_poison[:math.floor(p_team * n_parties)], proportions_normal[math.floor(p_team * n_parties):])
                    # print("proportions:", proportions)
                elif k == 7:
                    proportions = np.repeat(1 / n_parties, n_parties)
                    proportions_poison = np.array([((pos < math.floor(p_team * n_parties)) * -1 + 1) * val for pos, val in enumerate(proportions)])
                    proportions_normal = np.random.dirichlet(np.repeat(beta, n_parties))
                    proportions = np.append(proportions_poison[:math.floor(p_team * n_parties)], proportions_normal[math.floor(p_team * n_parties):])
                    # print("proportions:", proportions)
                else:
                    proportions = np.random.dirichlet(np.repeat(beta, n_parties))
                    # logger.info("proportions1: ", proportions)
                    # logger.info("sum pro1:", np.sum(proportions))
                ## Balance
                # proportions = np.array([p * (len(idx_j) < N / n_parties) for p, idx_j in zip(proportions, idx_batch)])
                # logger.info("proportions2: ", proportions)
                proportions = proportions / proportions.sum()
                # logger.info("proportions3: ", proportions)
                proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
                # logger.info("proportions4: ", proportions)
                idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
                min_size = min([len(idx_j) for idx_j in idx_batch])
                # if K == 2 and n_parties <= 10:
                #     if np.min(proportions) < 200:
                #         min_size = 0
                #         break
        
        for j in range(n_parties):
            np.random.shuffle(idx_batch[j])
            net_dataidx_map[j] = idx_batch[j]

    elif partition == "wzr":
        min_size = 0
        min_require_size = 10
        K = 10
        team_num = math.floor(p_team * n_parties)

        N = y_train.shape[0]
        #np.random.seed(2020)
        net_dataidx_map = {}

        while min_size < min_require_size:
            idx_batch = [[] for _ in range(n_parties)]
            for k in range(K):
                idx_k = np.where(y_train == k)[0]   #标签为k的图像对应的下标
                np.random.shuffle(idx_k)

                if p_team == 0:
                    proportions = np.random.dirichlet(np.repeat(beta, n_parties))
                elif k == 1:
                    proportions = np.repeat(1 / n_parties, n_parties)
                    proportions_poison = np.array([((pos < team_num) * 4 + 1) * val for pos, val in enumerate(proportions)])
                    proportions_normal = np.random.dirichlet(np.repeat(beta, n_parties))
                    proportions = np.append(proportions_poison[:team_num], proportions_normal[team_num:])
                elif k == 7:
                    proportions = np.repeat(1 / n_parties, n_parties)
                    proportions_poison = np.array([((pos < team_num) * -1 + 1) * val for pos, val in enumerate(proportions)])
                    proportions_normal = np.random.dirichlet(np.repeat(beta, n_parties))
                    proportions = np.append(proportions_poison[:team_num], proportions_normal[team_num:])
                else:
                    # proportions = np.repeat(1 / n_parties, n_parties)
                    # proportions_poison = np.array([0])
                    # proportions_normal = np.random.dirichlet(np.repeat(beta, n_parties))
                    # proportions_normal[1] = proportions_normal[2] / 2
                    # proportions = np.append(proportions_poison, proportions_normal[1:])
                    proportions = np.repeat(1 / n_parties, n_parties)
                    proportions_poison = np.zeros(team_num)
                    proportions_normal = np.random.dirichlet(np.repeat(beta, n_parties - team_num))
                    proportions = np.append(proportions_poison, proportions_normal)
                ## Balance
                # proportions = np.array([p * (len(idx_j) < N / n_parties) for p, idx_j in zip(proportions, idx_batch)])
                proportions = proportions / proportions.sum()
                proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
                idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
                min_size = min([len(idx_j) for idx_j in idx_batch])            
        
        # pdb.set_trace()

        for j in range(n_parties):
            np.random.shuffle(idx_batch[j])
            net_dataidx_map[j] = idx_batch[j]

    elif partition > "noniid-#label0" and partition <= "noniid-#label9":
        num = eval(partition[13:])
        if dataset in ('celeba', 'covtype', 'a9a', 'rcv1', 'SUSY'):
            num = 1
            K = 2
        else:
            K = 10
        if dataset == "cifar100":
            K = 100
        elif dataset == "tinyimagenet":
            K = 200
        if num == 10:
            net_dataidx_map ={i:np.ndarray(0,dtype=np.int64) for i in range(n_parties)}
            for i in range(10):
                idx_k = np.where(y_train==i)[0]
                np.random.shuffle(idx_k)
                split = np.array_split(idx_k,n_parties)
                for j in range(n_parties):
                    net_dataidx_map[j]=np.append(net_dataidx_map[j],split[j])
        else:
            times=[0 for i in range(K)]
            contain=[]
            for i in range(n_parties):
                current=[i%K]
                times[i%K]+=1
                j=1
                while (j<num):
                    ind=random.randint(0,K-1)
                    if (ind not in current):
                        j=j+1
                        current.append(ind)
                        times[ind]+=1
                contain.append(current)
            net_dataidx_map ={i:np.ndarray(0,dtype=np.int64) for i in range(n_parties)}
            for i in range(K):
                idx_k = np.where(y_train==i)[0]
                np.random.shuffle(idx_k)
                split = np.array_split(idx_k,times[i])
                ids=0
                for j in range(n_parties):
                    if i in contain[j]:
                        net_dataidx_map[j]=np.append(net_dataidx_map[j],split[ids])
                        ids+=1

    elif partition == "iid-diff-quantity":
        idxs = np.random.permutation(n_train)
        min_size = 0
        while min_size < 10:
            proportions = np.random.dirichlet(np.repeat(beta, n_parties))
            proportions = proportions/proportions.sum()
            min_size = np.min(proportions*len(idxs))
        proportions = (np.cumsum(proportions)*len(idxs)).astype(int)[:-1]
        batch_idxs = np.split(idxs,proportions)
        net_dataidx_map = {i: batch_idxs[i] for i in range(n_parties)}
        
    elif partition == "mixed":
        min_size = 0
        min_require_size = 10
        K = 10
        if dataset in ('celeba', 'covtype', 'a9a', 'rcv1', 'SUSY'):
            K = 2
            # min_require_size = 100

        N = y_train.shape[0]
        net_dataidx_map = {}

        times=[1 for i in range(10)]
        contain=[]
        for i in range(n_parties):
            current=[i%K]
            j=1
            while (j<2):
                ind=random.randint(0,K-1)
                if (ind not in current and times[ind]<2):
                    j=j+1
                    current.append(ind)
                    times[ind]+=1
            contain.append(current)
        net_dataidx_map ={i:np.ndarray(0,dtype=np.int64) for i in range(n_parties)}
        

        min_size = 0
        while min_size < 10:
            proportions = np.random.dirichlet(np.repeat(beta, n_parties))
            proportions = proportions/proportions.sum()
            min_size = np.min(proportions*n_train)

        for i in range(K):
            idx_k = np.where(y_train==i)[0]
            np.random.shuffle(idx_k)

            proportions_k = np.random.dirichlet(np.repeat(beta, 2))
            #proportions_k = np.ndarray(0,dtype=np.float64)
            #for j in range(n_parties):
            #    if i in contain[j]:
            #        proportions_k=np.append(proportions_k ,proportions[j])

            proportions_k = (np.cumsum(proportions_k)*len(idx_k)).astype(int)[:-1]

            split = np.split(idx_k, proportions_k)
            ids=0
            for j in range(n_parties):
                if i in contain[j]:
                    net_dataidx_map[j]=np.append(net_dataidx_map[j],split[ids])
                    ids+=1

    elif partition == "real" and dataset == "femnist":
        num_user = u_train.shape[0]
        user = np.zeros(num_user+1,dtype=np.int32)
        for i in range(1,num_user+1):
            user[i] = user[i-1] + u_train[i-1]
        no = np.random.permutation(num_user)
        batch_idxs = np.array_split(no, n_parties)
        net_dataidx_map = {i:np.zeros(0,dtype=np.int32) for i in range(n_parties)}
        for i in range(n_parties):
            for j in batch_idxs[i]:
                net_dataidx_map[i]=np.append(net_dataidx_map[i], np.arange(user[j], user[j+1]))
                
    elif partition == "transfer-from-femnist":
        stat = np.load("femnist-dis.npy")
        n_total = stat.shape[0]
        chosen = np.random.permutation(n_total)[:n_parties]
        stat = stat[chosen,:]
        
        if dataset in ('celeba', 'covtype', 'a9a', 'rcv1', 'SUSY'):
            K = 2
        else:
            K = 10
        
        N = y_train.shape[0]
        #np.random.seed(2020)
        net_dataidx_map = {}

        idx_batch = [[] for _ in range(n_parties)]
        for k in range(K):
            idx_k = np.where(y_train == k)[0]
            np.random.shuffle(idx_k)
            proportions = stat[:,k]
            # logger.info("proportions2: ", proportions)
            proportions = proportions / proportions.sum()
            # logger.info("proportions3: ", proportions)
            proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
            # logger.info("proportions4: ", proportions)
            idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]


        for j in range(n_parties):
            np.random.shuffle(idx_batch[j])
            net_dataidx_map[j] = idx_batch[j]

    elif partition == "transfer-from-criteo":
        stat0 = np.load("criteo-dis.npy")
        
        n_total = stat0.shape[0]
        flag=True
        while (flag):
            chosen = np.random.permutation(n_total)[:n_parties]
            stat = stat0[chosen,:]
            check = [0 for i in range(10)]
            for ele in stat:
                for j in range(10):
                    if ele[j]>0:
                        check[j]=1
            flag=False
            for i in range(10):
                if check[i]==0:
                    flag=True
                    break
                    
        
        if dataset in ('celeba', 'covtype', 'a9a', 'rcv1', 'SUSY'):
            K = 2
            stat[:,0]=np.sum(stat[:,:5],axis=1)
            stat[:,1]=np.sum(stat[:,5:],axis=1)
        else:
            K = 10
        
        N = y_train.shape[0]
        #np.random.seed(2020)
        net_dataidx_map = {}

        idx_batch = [[] for _ in range(n_parties)]
        for k in range(K):
            idx_k = np.where(y_train == k)[0]
            np.random.shuffle(idx_k)
            proportions = stat[:,k]
            # logger.info("proportions2: ", proportions)
            proportions = proportions / proportions.sum()
            # logger.info("proportions3: ", proportions)
            proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
            # logger.info("proportions4: ", proportions)
            idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
  

        for j in range(n_parties):
            np.random.shuffle(idx_batch[j])
            net_dataidx_map[j] = idx_batch[j]
            
    traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
    return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts)


def get_trainable_parameters(net):
    'return trainable parameter values as a vector (only the first parameter set)'
    trainable=filter(lambda p: p.requires_grad, net.parameters())
    # logger.info("net.parameter.data:", list(net.parameters()))
    paramlist=list(trainable)
    N=0
    for params in paramlist:
        N+=params.numel()
        # logger.info("params.data:", params.data)
    X=torch.empty(N,dtype=torch.float64)
    X.fill_(0.0)
    offset=0
    for params in paramlist:
        numel=params.numel()
        with torch.no_grad():
            X[offset:offset+numel].copy_(params.data.view_as(X[offset:offset+numel].data))
        offset+=numel
    # logger.info("get trainable x:", X)
    return X


def put_trainable_parameters(net,X):
    'replace trainable parameter values by the given vector (only the first parameter set)'
    trainable=filter(lambda p: p.requires_grad, net.parameters())
    paramlist=list(trainable)
    offset=0
    for params in paramlist:
        numel=params.numel()
        with torch.no_grad():
            params.data.copy_(X[offset:offset+numel].data.view_as(params.data))
        offset+=numel


def compute_accuracy(model, dataloader, get_confusion_matrix=False, moon_model=False, device="cpu"):

    was_training = False
    if model.training:
        model.eval()
        was_training = True

    true_labels_list, pred_labels_list = np.array([]), np.array([])

    if type(dataloader) == type([1]):
        pass
    else:
        dataloader = [dataloader]

    correct, total = 0, 0
    with torch.no_grad():
        for tmp in dataloader:
            for batch_idx, (x, target) in enumerate(tmp):
                x, target = x.to(device), target.to(device,dtype=torch.int64)
                if moon_model:
                    _, _, out = model(x)
                else:
                    out = model(x)
                _, pred_label = torch.max(out.data, 1)

                total += x.data.size()[0]
                correct += (pred_label == target.data).sum().item()

                if device == "cpu":
                    pred_labels_list = np.append(pred_labels_list, pred_label.numpy())
                    true_labels_list = np.append(true_labels_list, target.data.numpy())
                else:
                    pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy())
                    true_labels_list = np.append(true_labels_list, target.data.cpu().numpy())

    if get_confusion_matrix:
        conf_matrix = confusion_matrix(true_labels_list, pred_labels_list)

    if was_training:
        model.train()

    if get_confusion_matrix:
        return correct/float(total), conf_matrix

    return correct/float(total)


def compute_17_error_rate(model, dataloader, device="cpu"):

    was_training = False
    if model.training:
        model.eval()
        was_training = True

    true_labels_list, pred_labels_list = np.array([]), np.array([])

    if type(dataloader) == type([1]):
        pass
    else:
        dataloader = [dataloader]

    error, total = 0, 0

    with torch.no_grad():
        for tmp in dataloader:
            for batch_idx, (x, target) in enumerate(tmp):
                x, target = x.to(device), target.to(device,dtype=torch.int64)
                out = model(x)
                _, pred_label = torch.max(out.data, 1)

                total += (target.data == 1).sum().item()
                pos_1 = torch.nonzero(target == 1).squeeze()
                pred_label_1 = pred_label[pos_1]
                '''
                pred_label_1 = pred_label == 1
                '''
                # target_1 = target[pos_1]
                # error += (pred_label_1 != target_1).sum().item()
                
                error += (pred_label_1 == 7).sum().item()

    if was_training:
        model.train()

    return error/float(total)


def save_model(model, model_index, args):
    logger.info("saving local model-{}".format(model_index))
    with open(args.modeldir + args.log_file_name + '-' + str(model_index), "wb") as f_:
        torch.save(model.state_dict(), f_)
    return

def load_model(model, model_index, args, device="cpu"):
    with open(args.modeldir + args.log_file_name + '-' + str(model_index), "rb") as f_:
        model.load_state_dict(torch.load(f_))
    model.to(device)
    return model

def load_target():
    with open('target.txt', "rb") as f_:
        target = torch.load(f_).squeeze().to('cuda:0')
    return target


class AddGaussianNoise(object):
    def __init__(self, mean=0., std=1., net_id=None, total=0):
        self.std = std
        self.mean = mean
        self.net_id = net_id
        self.num = int(sqrt(total))
        if self.num * self.num < total:
            self.num = self.num + 1

    def __call__(self, tensor):
        if self.net_id is None:
            return tensor + torch.randn(tensor.size()) * self.std + self.mean
        else:
            tmp = torch.randn(tensor.size())
            filt = torch.zeros(tensor.size())
            size = int(28 / self.num)
            row = int(self.net_id / size)
            col = self.net_id % size
            for i in range(size):
                for j in range(size):
                    filt[:,row*size+i,col*size+j] = 1
            tmp = tmp * filt
            return tensor + tmp * self.std + self.mean

    def __repr__(self):
        return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)

def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, noise_level=0, net_id=None, total=0):
    if dataset in ('mnist', 'femnist', 'fmnist', 'cifar10', 'svhn', 'generated', 'covtype', 'a9a', 'rcv1', 'SUSY', 'cifar100', 'tinyimagenet'):
        if dataset == 'mnist':
            dl_obj = MNIST_truncated

            transform_train = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])

            transform_test = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])

        elif dataset == 'femnist':
            dl_obj = FEMNIST
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])
            transform_test = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])

        elif dataset == 'fmnist':
            dl_obj = FashionMNIST_truncated
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])
            transform_test = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])

        elif dataset == 'svhn':
            dl_obj = SVHN_custom
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])
            transform_test = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])


        elif dataset == 'cifar10':
            dl_obj = CIFAR10_truncated

            transform_train = transforms.Compose([
                transforms.ToTensor(),
                transforms.Lambda(lambda x: F.pad(
                    Variable(x.unsqueeze(0), requires_grad=False),
                    (4, 4, 4, 4), mode='reflect').data.squeeze()),
                transforms.ToPILImage(),
                transforms.RandomCrop(32),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)
            ])
            # data prep for test set
            transform_test = transforms.Compose([
                transforms.ToTensor(),
                AddGaussianNoise(0., noise_level, net_id, total)])
            
        elif dataset == 'cifar100':
            dl_obj = CIFAR100_truncated

            normalize = transforms.Normalize(mean=[0.5070751592371323, 0.48654887331495095, 0.4409178433670343],
                                             std=[0.2673342858792401, 0.2564384629170883, 0.27615047132568404])
            # transform_train = transforms.Compose([
            #     transforms.RandomCrop(32),
            #     transforms.RandomHorizontalFlip(),
            #     transforms.ToTensor(),
            #     normalize
            # ])
            transform_train = transforms.Compose([
                # transforms.ToPILImage(),
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.RandomRotation(15),
                transforms.ToTensor(),
                normalize
            ])
            # data prep for test set
            transform_test = transforms.Compose([
                transforms.ToTensor(),
                normalize])
        elif dataset == 'tinyimagenet':
            dl_obj = ImageFolder_custom
            transform_train = transforms.Compose([
                transforms.Resize(32), 
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.RandomRotation(15),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ])
            transform_test = transforms.Compose([
                transforms.Resize(32), 
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ])


        else:
            dl_obj = Generated
            transform_train = None
            transform_test = None

            
        if dataset == "tinyimagenet":
            train_ds = dl_obj(datadir+'./train/', dataidxs=dataidxs, transform=transform_train)
            test_ds = dl_obj(datadir+'./val/', transform=transform_test)
        else:
            train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)
            test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)

        train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False)
        test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)

    return train_dl, test_dl, train_ds, test_ds

def weights_init(m):
    """
    Initialise weights of the model.
    """
    if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif(type(m) == nn.BatchNorm2d):
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)

class NormalNLLLoss:
    """
    Calculate the negative log likelihood
    of normal distribution.
    This needs to be minimised.

    Treating Q(cj | x) as a factored Gaussian.
    """
    def __call__(self, x, mu, var):

        logli = -0.5 * (var.mul(2 * np.pi) + 1e-6).log() - (x - mu).pow(2).div(var.mul(2.0) + 1e-6)
        nll = -(logli.sum(1).mean())

        return nll

def noise_sample(choice, n_dis_c, dis_c_dim, n_con_c, n_z, batch_size, device):
    """
    Sample random noise vector for training.

    INPUT
    --------
    n_dis_c : Number of discrete latent code.
    dis_c_dim : Dimension of discrete latent code.
    n_con_c : Number of continuous latent code.
    n_z : Dimension of iicompressible noise.
    batch_size : Batch Size
    device : GPU/CPU
    """

    z = torch.randn(batch_size, n_z, 1, 1, device=device)
    idx = np.zeros((n_dis_c, batch_size))
    if(n_dis_c != 0):
        dis_c = torch.zeros(batch_size, n_dis_c, dis_c_dim, device=device)

        c_tmp = np.array(choice)

        for i in range(n_dis_c):
            idx[i] = np.random.randint(len(choice), size=batch_size)
            for j in range(batch_size):
                idx[i][j] = c_tmp[int(idx[i][j])]

            dis_c[torch.arange(0, batch_size), i, idx[i]] = 1.0

        dis_c = dis_c.view(batch_size, -1, 1, 1)

    if(n_con_c != 0):
        # Random uniform between -1 and 1.
        con_c = torch.rand(batch_size, n_con_c, 1, 1, device=device) * 2 - 1

    noise = z
    if(n_dis_c != 0):
        noise = torch.cat((z, dis_c), dim=1)
    if(n_con_c != 0):
        noise = torch.cat((noise, con_c), dim=1)

    return noise, idx


def split_dict_by_count(dictionary, count):
    sub_dicts = []
    keys = list(dictionary.keys())
    total_keys = len(keys)

    for i in range(0, total_keys, count):
        sub_dict = {k: dictionary[k] for k in keys[i:i + count]}
        sub_dicts.append(sub_dict)

    return sub_dicts


def compute_gradient(global_para: dict, local_para: dict) -> dict:
    gradient = {}

    for key in global_para.keys():
        gradient[key] = local_para[key] - global_para[key]

    return gradient

# def to_matrix(gradient_list):
#     # para_matrix = {}
#     # for key in local_para_list[0]:
#     #     grad_list = []
#     #     for local_para in local_para_list:
#     #         grad_list.append(torch.squeeze(local_para[key].reshape(1, -1)))
#     #     para_matrix[key] = torch.tensor(np.array([item.cpu().detach().numpy() for item in grad_list]))
    
#     return para_matrix

def compute_cosine_similarity(gradient_matrix):
    # maxcs = []
    # for key, matrix in gradient_matrix.items():
    #     cs = smp.cosine_similarity(matrix) - np.eye(matrix.shape[0])
    #     maxcs.append(np.max(cs, axis=1))

    # scores = []
    # for id in range(len(maxcs[0])):
    #     sum = 0
    #     for key in range(len(maxcs)):
    #         sum += maxcs[key][id]
    #     scores.append(sum)

    cs = smp.cosine_similarity(gradient_matrix) - np.eye(gradient_matrix.shape[0])
    scores = np.max(cs, axis=1)

    return scores

def cosine_similarity_sum(gradient_matrix):
    cs = smp.cosine_similarity(gradient_matrix) - np.eye(gradient_matrix.shape[0])
    scores = np.sum(cs, axis=1)

    return scores

def compute_team_ratio(scores: list, n_parties: int):
    scores = sorted(scores, reverse=True)[: int(n_parties * 0.4)]
    increase = [abs((scores[i] - scores[i + 1]) * (scores[i] + scores[i + 1])) for i in range(len(scores) - 1)]

    poison_ratio = (increase.index(max(increase)) + 1) / n_parties 
    
    return poison_ratio

# def compute_solo_ratio(scores: list, n_parties: int):
#     scores = sorted(scores, reverse=True)
#     print(scores)
#     increase = [abs(scores[i + 1] - scores[i]) for i in range(len(scores) - 1)]
#     print(increase)
#     poison_ratio = (increase.index(max(increase)) + 1) / n_parties 
    
#     return poison_ratio

def team_filter(cs_scores: list, fc3_weight_list: list, client_ids: list) -> tuple:
    cs_fcw = {}

    cs_new = copy.deepcopy(cs_scores)

    for i, cs in enumerate(cs_scores):
        while cs in cs_fcw.keys():
            cs *= 1.00001
        cs_new[i] = cs
        cs_fcw[cs] = fc3_weight_list[i]
    
    fcw_id = {fcw: client_ids[i] for i, fcw in enumerate(fc3_weight_list)}
    id_fcw = {v: k for k, v in fcw_id.items()}

    cs_new.sort()
    p_team = compute_team_ratio(cs_new, len(client_ids))
    cs_selected = cs_new[: math.floor((1 - p_team) * len(client_ids))]
    fcw_selected = [cs_fcw[cs] for cs in cs_selected]
    fcw_matrix = torch.tensor(np.array([item.cpu().detach().numpy() for item in fcw_selected]))
    cs_sums = cosine_similarity_sum(fcw_matrix)
    css_id = {css: fcw_id[fcw] for css, fcw in zip(cs_sums, fcw_selected)}
    cs_sums.sort()

    client_with_solo = [fcw_id[fcw] for fcw in fcw_selected]

    return id_fcw, css_id, cs_sums, client_with_solo, p_team

def solo_filter(css_id: dict, cs_sums: list, n_parties: int, p_solo: float) -> tuple:
    css_selected = cs_sums[math.floor(p_solo * n_parties) :]
    id_selected = [css_id[css] for css in css_selected]

    return id_selected

# def compute_team_and_solo_ratio(cs_scores: list, gradient_list: list, client_ids: list) -> tuple:
#     cs_fcw = {}
#     # fcw_id = {}
#     # css_id = {}

#     fc3_weight_list = [torch.squeeze(gradient['fc3.weight'].reshape(1, -1)) for gradient in gradient_list]

#     cs_new = copy.deepcopy(cs_scores)

#     for i, cs in enumerate(cs_scores):
#         while cs in cs_fcw.keys():
#             cs *= 1.00001
#         cs_new[i] = cs
#         cs_fcw[cs] = fc3_weight_list[i]
    
#     fcw_id = {fcw: client_ids[i] for i, fcw in enumerate(fc3_weight_list)}
#     id_fcw = {v: k for k, v in fcw_id.items()}
    
#     cs_new.sort()
#     p_team = compute_team_ratio(cs_new, len(client_ids))
#     cs_selected = cs_new[: math.floor((1 - p_team) * len(client_ids))]
#     fcw_selected = [cs_fcw[cs] for cs in cs_selected]
#     fcw_matrix = torch.tensor(np.array([item.cpu().detach().numpy() for item in fcw_selected]))
#     cs_sums = cosine_similarity_sum(fcw_matrix)
#     css_id = {css: fcw_id[fcw] for css, fcw in zip(cs_sums, fcw_selected)}

#     cs_sums.sort()
#     p_solo = compute_solo_ratio(cs_sums, len(client_ids))
#     css_selected = cs_sums[math.floor(p_solo * len(client_ids)) :]
#     id_selected = [css_id[css] for css in css_selected]
#     fcw_selected = [id_fcw[id] for id in id_selected]

#     cs_avg = mean(compute_cosine_similarity(fcw_selected))
    
#     return p_solo, p_team, cs_avg

# def select_and_aggregate(css_id: dict, cs_sums: list, client_ids: list, local_para_list: list, global_para: dict, p_solo: float, client_with_solo: list) -> tuple:
#     id_para = {id: local_para_list[i] for i, id in enumerate(client_ids)}

#     css_selected = cs_sums[math.floor(p_solo * len(client_ids)) :]
#     id_selected = [css_id[css] for css in css_selected]
#     para_selected = [id_para[id] for id in id_selected]

#     num_selected = len(para_selected)
    
#     for idx in range(num_selected):
#         noise_para = para_selected[idx]
#         if idx == 0:
#             for key in noise_para:
#                 global_para[key] = noise_para[key] / num_selected
#         else:
#             for key in noise_para:
#                 global_para[key] += noise_para[key] / num_selected
    
#     team = set(client_ids) - set(client_with_solo)
#     solo = set(client_with_solo) - set(id_selected)

#     return global_para, id_selected, team, solo

# def select_and_aggregate(cs_scores: list, gradient_list: list, client_ids: list, local_para_list: list, global_para: dict, p_solo: float, p_team: float) -> tuple:
#     cs_fcw = {}
#     # fcw_id = {}
#     # css_id = {}
#     # id_para = {}

#     fc3_weight_list = [torch.squeeze(gradient['fc3.weight'].reshape(1, -1)) for gradient in gradient_list]

#     cs_new = copy.deepcopy(cs_scores)

#     for i, cs in enumerate(cs_scores):
#         while cs in cs_fcw.keys():
#             cs *= 1.00001
#         cs_new[i] = cs
#         cs_fcw[cs] = fc3_weight_list[i]
    
#     fcw_id = {fcw: client_ids[i] for i, fcw in enumerate(fc3_weight_list)}
#     id_para = {id: local_para_list[i] for i, id in enumerate(client_ids)}
    
#     cs_new.sort()
#     cs_selected = cs_new[: math.floor((1 - p_team) * len(client_ids))]
#     fcw_selected = [cs_fcw[cs] for cs in cs_selected]
#     fcw_matrix = torch.tensor(np.array([item.cpu().detach().numpy() for item in fcw_selected]))
#     cs_sums = cosine_similarity_sum(fcw_matrix)
#     css_id = {css: fcw_id[fcw] for css, fcw in zip(cs_sums, fcw_selected)}
#     css_id_backup = copy.deepcopy(css_id)

#     cs_sums.sort()
#     css_selected = cs_sums[math.floor(p_solo * len(client_ids)) :]
#     id_selected = [css_id[css] for css in css_selected]
#     para_selected = [id_para[id] for id in id_selected]

#     num_selected = len(para_selected)
    
#     for idx in range(num_selected):
#         noise_para = para_selected[idx]
#         if idx == 0:
#             for key in noise_para:
#                 global_para[key] = noise_para[key] / num_selected
#         else:
#             for key in noise_para:
#                 global_para[key] += noise_para[key] / num_selected

#     client_with_solo = [fcw_id[fcw] for fcw in fcw_selected]
#     team = set(client_ids) - set(client_with_solo)
#     solo = set(client_with_solo) - set(id_selected)

#     return global_para, id_selected, team, solo, css_id_backup

# def select_and_aggregate(cs_scores: list, absolute_values: list, client_ids: list, local_para_list: list, global_para: dict, p_solo: float, p_team: float) -> tuple:
#     cs_av = {}
#     # av_id = {}
#     # avd_id = {}
#     # id_para = {}

#     cs_new = copy.deepcopy(cs_scores)

#     for i, cs in enumerate(cs_scores):
#         while cs in cs_av.keys():
#             cs *= 1.00001
#         cs_new[i] = cs
#         cs_av[cs] = absolute_values[i]
    
#     av_id = {av: client_ids[i] for i, av in enumerate(absolute_values)}
#     id_para = {id: local_para_list[i] for i, id in enumerate(client_ids)}
   
#     cs_new.sort()
#     cs_selected = cs_new[: math.floor((1 - p_team) * len(client_ids))]
    
#     av_selected = [cs_av[cs] for cs in cs_selected]
#     av_average = np.average(av_selected).item()
#     av_distance = [abs(av - av_average) for av in av_selected]
#     avd_id = {abs(av - av_average): id for av, id in av_id.items()}

#     av_distance.sort()
#     avd_selected = av_distance[: len(av_distance) - math.floor(p_solo * len(client_ids))]
#     id_selected = [avd_id[avd] for avd in avd_selected]
    
#     para_selected = [id_para[id] for id in id_selected]

#     num_selected = len(para_selected)
    
#     for idx in range(num_selected):
#         noise_para = para_selected[idx]
#         if idx == 0:
#             for key in noise_para:
#                 global_para[key] = noise_para[key] / num_selected
#         else:
#             for key in noise_para:
#                 global_para[key] += noise_para[key] / num_selected

#     client_with_solo = [av_id[av] for av in av_selected]
#     team = set(client_ids) - set(client_with_solo)
#     solo = set(client_with_solo) - set(id_selected)

#     return global_para, id_selected, team, solo

def cosine_similarity_sort(gradient_matrix):
    cs = smp.cosine_similarity(gradient_matrix) - np.eye(gradient_matrix.shape[0])
    id_score_global = {}
    keys_global = []
    for id_i in range(len(cs)):
        id_score = {}
        for id_j, score in enumerate(cs[id_i]):
            id_score[id_j] = int(score * 1000) / 1000
        id_score_sort = dict(sorted(id_score.items(),key = lambda x:x[1],reverse = True))
        keys = list(id_score_sort.keys())
        id_score_global[id_i] = id_score_sort
        keys_global.append(keys)

    return id_score_global, keys_global

def save_gradient(gradient_list: list, round: int):
    with open ('gradients.txt', 'w') as f:
        current_round = "Round: " + str(round) + '\n'
        f.write(current_round)
        for id, gradient in enumerate(gradient_list):
            f.write('---------------------------------\n')
            f.write(str(id) + '\n')
            f.write(str(gradient['fc3.weight']) + '\n')

def absolute_and_max(gradient_list: dict, dir: str, round: int):
    with open ('absolute_and_max_' + dir + '.txt', 'a') as f:
        current_round = "Round: " + str(round) + '\n'
        f.write(current_round)
        for id, gradient in enumerate(gradient_list):
            f.write('Client ' + str(id) + ': ')
            f.write('absolute value: ' + str(torch.norm(gradient['fc3.weight'].float(), 1)) + '  ')
            f.write('max value: ' + str(torch.norm(gradient['fc3.weight'].float(), float('inf'))) + '\n')
        f.write('---------------------------------\n')


def foolsgold(grads):
    n_clients = grads.shape[0]
    cs = smp.cosine_similarity(grads) - np.eye(n_clients)
    maxcs = np.max(cs, axis=1)
    # pardoning
    for i in range(n_clients):
        for j in range(n_clients):
            if i == j:
                continue
            if maxcs[i] < maxcs[j]:
                cs[i][j] = cs[i][j] * maxcs[i] / maxcs[j]
    wv = 1 - (np.max(cs, axis=1))
    wv[wv > 1] = 1
    wv[wv < 0] = 0

    # Rescale so that max value is wv
    wv = wv / np.max(wv)
    wv[(wv == 1)] = .99

    # Logit function
    wv = (np.log(wv / (1 - wv)) + 0.5)
    wv[(np.isinf(wv) + wv > 1)] = 1
    wv[(wv < 0)] = 0

    return wv


def getKrum(x):
    '''
    compute krum or multi-krum of input. O(dn^2)
    
    input : n * vector dimension
    
    '''

    n = x.shape[0]
    f = n // 2  # worse case 50% malicious points
    k = n - f - 2
    # collection distance, distance from points to points
    cdist = torch.cdist(x, x, p=2)

    # find the k+1 nbh of each point
    nbhDist, _ = torch.topk(cdist, k + 1, largest=False)
    _, sel = torch.topk(nbhDist.sum(1), k + 2, largest=False)

    # Multi-Krum
    # mkrum = input[sel, :].mean(2, keepdims=True)
    
    return sel
