import torch
import random
import numpy as np
import copy
from multiprocessing import Process, Pipe, Pool
from net import SIMPLE_NET, CNN, MULTI_LAYER, CIFAR10_CNN
from train_model import TrainModel
from client import Client
from load_partition_data import load_partition_data
import matplotlib.pyplot as plt


def client_sampling(train_sample_num, round_idx, total_num, psi_sample_num = 2):
    if train_sample_num == total_num:
        train_client_indexes = [client_index for client_index in range(total_num)]
    elif train_sample_num < total_num:
        seed = random.seed(round_idx)
        train_client_indexes = random.sample(range(total_num), train_sample_num)
    else:
        raise Exception('sample_num bigger than total_num is not allowed')
    psi_client_indexes = random.sample(range(total_num), psi_sample_num)
    print('train_client_indexes:', train_client_indexes, 'psi_client_indexes:', psi_client_indexes)
    return train_client_indexes, psi_client_indexes

def create_client(client_num, train_data, test_data, data_num, trainmodel, psi = False, pipe_conn = None):
    client_obj_list = []
    for idx in range(client_num):
        client = Client(client_idx = idx, client_train_data = train_data[idx], client_test_data = test_data[idx],
                        client_data_num = data_num[idx], model = trainmodel, psi = psi, pipe_conn = pipe_conn)

        client_obj_list.append(client)

    return client_obj_list

def aggregate(w_local):
    training_num = 0
    for i in range(len(w_local)):
        num, _ = w_local[i]
        training_num += num
    _, avg_params = w_local[0]
    for i in range(len(w_local)):
        data_num, local_params = w_local[i]
        weight = data_num / training_num
        for key in avg_params:
            if i == 0:
                avg_params[key] = local_params[key] * weight
            else:
                avg_params[key] += local_params[key] * weight

    return avg_params #avg_params is a state_dict

def create_host_guest(client_psi_obj_list, client_psi_indexes, train_data_dict, test_data_dict, each_client_data_num_dict, host_pipe, guest_pipe):
    host, guest = client_psi_obj_list
    host_idx, guest_idx = client_psi_indexes
    host.update_local_dataset(client_idx = host_idx, train_data = train_data_dict[host_idx], test_data = test_data_dict[host_idx],
                                         data_num = each_client_data_num_dict[host_idx])
    guest.update_local_dataset(client_idx = guest_idx, train_data = train_data_dict[guest_idx], test_data = test_data_dict[guest_idx],
                                         data_num = each_client_data_num_dict[guest_idx])
    # host.pipe_conn, guest.pipe_conn = Pipe() #每次都要创建一个管道，或许有些浪费
    host.pipe_conn, guest.pipe_conn = host_pipe, guest_pipe
    return host,guest

def host_run_psi(client):
    
    host = client
    host.get_label()
    host.get_hash_label()
    receive_list = host.receive()
    host.get_intersection(received_list = receive_list)
    host.send(message = host.intersection)
    
    percentage = host.calculate_percentage()
    host.pipe_conn.close()
    # print('host percentage :', percentage )
    return percentage

def guest_run_psi(client):

    guest = client
    guest.get_label()
    guest.get_hash_label()
    guest.send()
    guest.intersection = guest.receive()
    percentage = guest.calculate_percentage()
    guest.pipe_conn.close()
    # print('guest percentage :', percentage)
    return percentage

def close_pipe(client_psi_obj_list):
    host, guest = client_psi_obj_list
    host.pipe_conn.close()
    guest.pipe_conn.close()

def train(batch_size, client_num_per_round, round_num, lr, Train_model, epoch, psi:bool, degree_of_noniid, mode, 
            psi_num_per_round = 2):#only support 2-party PSI
    test_acc = []
    test_loss = []
    # decay_dict = {1:0.975, 2:0.95, 3:0.94, 4:0.92, 5:0.90, 6:0.88, 7:0.86}
    # decay_rate = decay_dict[epoch]
    decay_rate = 0.96

    total_client_num, train_data_dict, test_data_dict, each_client_data_num_dict = load_partition_data(batch_size = batch_size, degree_of_noniid = degree_of_noniid,
                                                                                     mode = mode, test_data_mode = 'iid')

    client_train_obj_list = create_client(client_num = client_num_per_round, train_data = train_data_dict, test_data = test_data_dict,
                                data_num = each_client_data_num_dict, trainmodel = Train_model)

    client_psi_obj_list = create_client(client_num = psi_num_per_round, train_data = train_data_dict, test_data = test_data_dict,
                                data_num = each_client_data_num_dict, trainmodel = Train_model, psi = psi)

    acc, loss = test_clients(total_client_num = total_client_num, client_obj = client_train_obj_list[0], train_data = train_data_dict, 
                        test_data = test_data_dict, each_client_data_num = each_client_data_num_dict)
    test_acc.append(acc)
    test_loss.append(loss)

    w_global = Train_model.get_model_params()
    pool = Pool(processes = 2)
    # host_pipe, guest_pipe = Pipe()
    for round_idx in range(round_num):
        print('-------------------------------------------round:', round_idx)
        w_local = []
        train_client_indexes, psi_client_indexes = client_sampling(train_sample_num = client_num_per_round, round_idx = round_idx, total_num = total_client_num)
        if psi == True:
            host_pipe, guest_pipe = Pipe()
            host, guest = create_host_guest(client_psi_obj_list, client_psi_indexes = psi_client_indexes, train_data_dict = train_data_dict,
            test_data_dict = test_data_dict, each_client_data_num_dict = each_client_data_num_dict, host_pipe = host_pipe, guest_pipe = guest_pipe)
            host_result = pool.apply_async(host_run_psi, args = (host,))  #注意要加一个逗号，因为args要求为一个tuple，加了一个逗号才表示为tuple
            guest_result = pool.apply_async(guest_run_psi, args = (guest,))

        lr = lr * decay_rate

        for idx, client in enumerate(client_train_obj_list):
            train_client_idx = train_client_indexes[idx]
            client.update_local_dataset(client_idx = train_client_idx, train_data = train_data_dict[train_client_idx], test_data = test_data_dict[train_client_idx],
                                         data_num = each_client_data_num_dict[train_client_idx])

            w = client.train(lr = lr, epoch = epoch)
            w_local.append((client.get_train_num(), copy.deepcopy(w)))
        
        w_global = aggregate(w_local = w_local)
        Train_model.set_model_params(w_global)
        for client in client_train_obj_list:
            client.update_model(model = Train_model)
            
        acc, loss = test_clients(total_client_num = total_client_num, client_obj = client_train_obj_list[0], train_data = train_data_dict, 
                                test_data = test_data_dict, each_client_data_num = each_client_data_num_dict)
        test_acc.append(acc)
        test_loss.append(loss)
        if psi == True:
            host_percentage = host_result.get()
            guest_percentage = guest_result.get()
            print('host_percentage:', host_percentage, 'guest_percentage:', guest_percentage)
            host_pipe.close()        
            guest_pipe.close()
    pool.close()
    return test_acc, test_loss

                            
def test_clients(total_client_num, client_obj, train_data, test_data, each_client_data_num):
    train_metrics = {
            'num_samples': [],
            'num_correct': [],
            'losses': []
        }
    test_metrics = {
        'num_samples': [],
        'num_correct': [],
        'losses': []
    }

    for idx in range(total_client_num):
        client_obj.update_local_dataset(client_idx = idx, train_data = train_data[idx], test_data = test_data[idx], data_num = each_client_data_num[idx])
        
        train_local_metrics = client_obj.test(mode = 'train')
        train_metrics['num_samples'].append(copy.deepcopy(train_local_metrics['test_total']))
        train_metrics['num_correct'].append(copy.deepcopy(train_local_metrics['test_correct']))
        train_metrics['losses'].append(copy.deepcopy(train_local_metrics['test_loss']))

        test_local_metrics = client_obj.test(mode = 'test')
        test_metrics['num_samples'].append(copy.deepcopy(test_local_metrics['test_total']))
        test_metrics['num_correct'].append(copy.deepcopy(test_local_metrics['test_correct']))
        test_metrics['losses'].append(copy.deepcopy(test_local_metrics['test_loss']))

        # print('client_idx', idx, 'test_total = ', test_local_metrics['test_total'],\
        #         'test_correct', test_local_metrics['test_correct'], 'test_loss', test_local_metrics['test_loss'])
        # test on training dataset
    train_acc = sum(train_metrics['num_correct']) / sum(train_metrics['num_samples'])
    train_loss = sum(train_metrics['losses']) / sum(train_metrics['num_samples'])

    # test on test dataset
    test_acc = sum(test_metrics['num_correct']) / sum(test_metrics['num_samples'])
    test_loss = sum(test_metrics['losses']) / sum(test_metrics['num_samples'])

    train_stats = {'training_acc': train_acc, 'training_loss': train_loss}
    test_stats = {'test_acc': test_acc, 'test_loss': test_loss}
    print(train_stats, '\n',test_stats)
    return test_acc, test_loss

def find_max_acc(acc_list):
    max_acc = 0
    for element in acc_list:
        if element > max_acc:
            max_acc = element
    return max_acc

def find_convergence_round(acc_list, max_acc, gap = 0.01):
    for location in range(len(acc_list)):
        if (max_acc - acc_list[location]) <= gap:
            return location

if __name__ == '__main__':
    # Net = CIFAR10_CNN()
    #初始化训练模型，所有的client都用这个模型
    # Train_model = TrainModel(Net)

    max_epoch = 8
    batch_size = 64
    client_num_per_round = 5
    round_num = 80
    learning_rate = 0.01
    #if data_dir = 'total', then choose user_num = 10 or 100 or 500, if data_dir = 'original', the user_num is not need(it means you can choose any number)
    user_num = 10
    psi = False
    degree_of_noniid = '20'
    mode = 'cifar100'

    each_epoch_max_acc = []
    each_epoch_convergence_round = []


    plt.figure(figsize = (4,4))
    plt.xlabel('round')
    plt.ylabel('acc')
    round_list = [i for i in range(round_num + 1)]
    for i in range(1, max_epoch):#每一轮重新创建一个模型
        if mode == 'cifar10':
            Net = CIFAR10_CNN(10)
        elif mode == 'cifar100':
            Net = CIFAR10_CNN(20)
        else:
            Net = MULTI_LAYER()
        #初始化训练模型，所有的client都用这个模型
        Train_model = TrainModel(Net)
        epoch = i
        print('eopch', epoch, '-------------------------')
        acc,loss = train(batch_size = batch_size, client_num_per_round = client_num_per_round, round_num = round_num, lr = learning_rate, Train_model = Train_model,
                    epoch = epoch, psi = psi, degree_of_noniid = degree_of_noniid, mode = mode)
        max_acc = find_max_acc(acc_list = acc)
        each_epoch_max_acc.append(max_acc)
        convergence_round = find_convergence_round(acc_list = acc, max_acc = max_acc)
        each_epoch_convergence_round.append(convergence_round)
        plt.plot(round_list, acc, label = 'epoch = %s' %i)
    plt.legend(loc = 'lower left')
    print(each_epoch_max_acc)
    print(each_epoch_convergence_round)
    plt.show()
    print('Done!')