import torch
import random
import numpy as np
import copy
import math
from net import SIMPLE_NET, CNN, MULTI_LAYER, CIFAR10_CNN
from train_model import TrainModel
from client import Client
from load_partition_data import load_partition_data


def client_sampling(train_sample_num, round_idx, total_num):
    if train_sample_num == total_num:
        train_client_indexes = [client_index for client_index in range(total_num)]
    elif train_sample_num < total_num:
        seed = random.seed(round_idx)
        train_client_indexes = random.sample(range(total_num), train_sample_num)
    else:
        raise Exception('sample_num bigger than total_num is not allowed')
    print('train_client_indexes:', train_client_indexes)
    return train_client_indexes

def create_client(client_num, train_data, test_data, data_num, trainmodel, psi = False, pipe_conn = None):
    client_obj_list = []
    for idx in range(client_num):
        client = Client(client_idx = idx, client_train_data = train_data[idx], client_test_data = test_data[idx],
                        client_data_num = data_num[idx], model = trainmodel, psi = psi, pipe_conn = pipe_conn)

        client_obj_list.append(client)

    return client_obj_list

def aggregate(w_local):
    training_num = 0
    for i in range(len(w_local)):
        num, _ = w_local[i]
        training_num += num
    _, avg_params = w_local[0]
    for i in range(len(w_local)):
        data_num, local_params = w_local[i]
        weight = data_num / training_num
        for key in avg_params:
            if i == 0:
                avg_params[key] = local_params[key] * weight
            else:
                avg_params[key] += local_params[key] * weight

    return avg_params

def client_train(client_train_obj_list, lr, epoch, round_idx, total_client_num, train_data_dict, test_data_dict, each_client_data_num_dict):
    w_local = []
    train_client_indexes = client_sampling(train_sample_num = client_num_per_round, round_idx = round_idx, total_num = total_client_num)

    for idx, client in enumerate(client_train_obj_list):
        train_client_idx = train_client_indexes[idx]
        client.update_local_dataset(client_idx = train_client_idx, train_data = train_data_dict[train_client_idx], test_data = test_data_dict[train_client_idx],
                                        data_num = each_client_data_num_dict[train_client_idx])

        w = client.train(lr = lr, epoch = epoch)
        w_local.append((client.get_train_num(), copy.deepcopy(w)))
    return w_local

def cal_tensor_kl_divergence(y_tensor, y_prime_tensor, smooth_param = 0.001):
    dim1, dim2 = y_tensor.shape
    kl_divergence = 0
    for i in range(dim1):
        tensor = y_tensor[i]
        tensor_prime = y_prime_tensor[i]
        zero_value_num = 0
        zero_value_index = []
        for j in range(dim2):
            if tensor_prime[j] == 0:
                zero_value_num += 1
                zero_value_index.append(j)
        if zero_value_num > 0:
            for j in range(dim2):
                tensor_prime[j] -= (zero_value_num * smooth_param / (dim2 - zero_value_num))
            for j in zero_value_index:
                tensor_prime[j] += (zero_value_num * smooth_param / (dim2 - zero_value_num)) + smooth_param
        for j in range(dim2):
            kl_divergence += tensor[j] * (math.log2(tensor[j] / tensor_prime[j]))
    
    kl_divergence /= dim1
    return kl_divergence

def cal_kl_divergence(y, y_prime):
    all_kl_divergence = []
    for i in range(len(y)):
        # print(y.shape, y_prime.shape) #'list' object has no attribute 'shape'
        y_i = y[i]
        y_prime_i = y_prime[i]
        kl_divergence = 0
        for j in range(len(y_i)):
            y_i_j = y_i[j]
            y_prime_i_j = y_prime_i[j]
            kl_divergence += cal_tensor_kl_divergence(y_tensor = y_i_j, y_prime_tensor = y_prime_i_j)
        kl_divergence /= len(y_i)
        all_kl_divergence.append(kl_divergence)
    avg_kl_divergence = sum(all_kl_divergence) / len(y)
    return avg_kl_divergence

def get_kl_divergence(cal_round_num, client_list, lr, epoch, Train_model, round_idx, total_client_num, train_data_dict, test_data_dict, each_client_data_num_dict):
    total_kl_divergence = []
    for i in range(cal_round_num):
        y_total = []
        y_prime_total = []
        for idx, client in enumerate(client_list):
            y = client.use_current_w_get_y(data = train_data_dict[idx])
            y_total.append(y)
        w_local = client_train(client_train_obj_list = client_list, lr = lr, epoch = epoch, round_idx = -1, total_client_num = total_client_num,
                        train_data_dict = train_data_dict, test_data_dict = test_data_dict, each_client_data_num_dict = each_client_data_num_dict)
        for idx, client in enumerate(client_list):
            y_prime = client.use_current_w_get_y(data = train_data_dict[idx])
            y_prime_total.append(y_prime)
        
        w_global = aggregate(w_local = w_local)
        Train_model.set_model_params(w_global)
        for client in client_list:
            client.update_model(model = Train_model)

        kl_divergence = cal_kl_divergence(y = y_total, y_prime = y_prime_total)
        total_kl_divergence.append(kl_divergence)    

    avg_kl_divergence = sum(total_kl_divergence) / cal_round_num
    return avg_kl_divergence

def get_param_theta_based_on_kl_divergence(kl_divergence):
    pass

def train(batch_size, client_num_per_round, round_num, lr, Train_model, epoch, degree_of_noniid, mode, cal_round_num):

    total_client_num, train_data_dict, test_data_dict, each_client_data_num_dict = load_partition_data(batch_size = batch_size, degree_of_noniid = degree_of_noniid,
                                                                                     mode = mode, test_data_mode = 'iid')

    client_train_obj_list = create_client(client_num = client_num_per_round, train_data = train_data_dict, test_data = test_data_dict,
                                data_num = each_client_data_num_dict, trainmodel = Train_model)

    test_clients(total_client_num = total_client_num, client_obj = client_train_obj_list[0], train_data = train_data_dict, 
                        test_data = test_data_dict, each_client_data_num = each_client_data_num_dict)
    # w_global = Train_model.get_model_params()

    w_local = client_train(client_train_obj_list = client_train_obj_list, lr = lr, epoch = epoch, round_idx = -1, total_client_num = total_client_num,
                    train_data_dict = train_data_dict, test_data_dict = test_data_dict, each_client_data_num_dict = each_client_data_num_dict)
    w_global = aggregate(w_local = w_local)
    Train_model.set_model_params(w_global)
    for client in client_train_obj_list:
        client.update_model(model = Train_model)

    kl_divergence = get_kl_divergence(cal_round_num = cal_round_num, client_list = client_train_obj_list, lr = lr, epoch = epoch, Train_model = Train_model,
                                        round_idx = -1, total_client_num = total_client_num, train_data_dict = train_data_dict, test_data_dict = test_data_dict,
                                        each_client_data_num_dict = each_client_data_num_dict)
    print(kl_divergence)
    # theta = get_param_theta_based_on_kl_divergence(kl_divergence = kl_divergence)

    # for round_idx in range(round_num):
    #     print('-------------------------------------------round:', round_idx)
    #     w_local = client_train(client_train_obj_list = client_train_obj_list, lr = lr, epoch = theta, round_idx = round_idx, total_client_num = total_client_num,
    #                 train_data_dict = train_data_dict, test_data_dict = test_data_dict, each_client_data_num_dict = each_client_data_num_dict)
    #     # w_local = []
    #     # train_client_indexes = client_sampling(train_sample_num = client_num_per_round, round_idx = round_idx, total_num = total_client_num)

    #     # for idx, client in enumerate(client_train_obj_list):
    #     #     train_client_idx = train_client_indexes[idx]
    #     #     client.update_local_dataset(client_idx = train_client_idx, train_data = train_data_dict[train_client_idx], test_data = test_data_dict[train_client_idx],
    #     #                                  data_num = each_client_data_num_dict[train_client_idx])

    #     #     w = client.train(lr = lr, epoch = epoch)
    #     #     w_local.append((client.get_train_num(), copy.deepcopy(w)))
        
    #     w_global = aggregate(w_local = w_local)
    #     Train_model.set_model_params(w_global)
    #     for client in client_train_obj_list:
    #         client.update_model(model = Train_model)
            
    #     test_clients(total_client_num = total_client_num, client_obj = client_train_obj_list[0], train_data = train_data_dict, 
    #                             test_data = test_data_dict, each_client_data_num = each_client_data_num_dict)
                            
def test_clients(total_client_num, client_obj, train_data, test_data, each_client_data_num):
    train_metrics = {
            'num_samples': [],
            'num_correct': [],
            'losses': []
        }
    test_metrics = {
        'num_samples': [],
        'num_correct': [],
        'losses': []
    }

    for idx in range(total_client_num):
        client_obj.update_local_dataset(client_idx = idx, train_data = train_data[idx], test_data = test_data[idx], data_num = each_client_data_num[idx])
        
        train_local_metrics = client_obj.test(mode = 'train')
        train_metrics['num_samples'].append(copy.deepcopy(train_local_metrics['test_total']))
        train_metrics['num_correct'].append(copy.deepcopy(train_local_metrics['test_correct']))
        train_metrics['losses'].append(copy.deepcopy(train_local_metrics['test_loss']))

        test_local_metrics = client_obj.test(mode = 'test')
        test_metrics['num_samples'].append(copy.deepcopy(test_local_metrics['test_total']))
        test_metrics['num_correct'].append(copy.deepcopy(test_local_metrics['test_correct']))
        test_metrics['losses'].append(copy.deepcopy(test_local_metrics['test_loss']))

        print('client_idx', idx, 'test_total = ', test_local_metrics['test_total'],\
                'test_correct', test_local_metrics['test_correct'], 'test_loss', test_local_metrics['test_loss'])
        # test on training dataset
    train_acc = sum(train_metrics['num_correct']) / sum(train_metrics['num_samples'])
    train_loss = sum(train_metrics['losses']) / sum(train_metrics['num_samples'])

    # test on test dataset
    test_acc = sum(test_metrics['num_correct']) / sum(test_metrics['num_samples'])
    test_loss = sum(test_metrics['losses']) / sum(test_metrics['num_samples'])

    train_stats = {'training_acc': train_acc, 'training_loss': train_loss}
    test_stats = {'test_acc': test_acc, 'test_loss': test_loss}
    print(train_stats, '\n',test_stats)
    

if __name__ == '__main__':
    Net = CIFAR10_CNN()
    #初始化训练模型，所有的client都用这个模型
    Train_model = TrainModel(Net)

    epoch = 1
    batch_size = 64
    client_num_per_round = 5
    round_num = 80
    learning_rate = 0.03
    #if data_dir = 'total', then choose user_num = 10 or 100 or 500, if data_dir = 'original', the user_num is not need(it means you can choose any number)
    degree_of_noniid = '0'
    mode = 'cifar10'
    cal_round_num = 3

    train(batch_size = batch_size, client_num_per_round = client_num_per_round, round_num = round_num, lr = learning_rate, Train_model = Train_model,
            epoch = epoch, degree_of_noniid = degree_of_noniid, mode = mode, cal_round_num = cal_round_num)
    print('Done!')