import sys
import os

# Set up paths
current_dir = os.path.dirname(os.path.abspath(__file__))
utils_path = os.path.join(current_dir, 'utils')
sys.path.append(utils_path)

import copy
import torch
import torch.nn as nn
import torch.utils.data.dataloader as dataloader
import random
import numpy as np
import datetime
from network_AlexNet import model_selection
from dataset import Dataset, Data_Partition
from utils import calculate_v_value, replace_user, sample_or_generate_features, compute_local_adjustment, find_client_with_min_time, generate_computing, generate_position, generate_communication_rate


os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

'''Experimental parameter settings'''
iid = False
dirichlet = False
shard = 2
alpha = 0.1
Epochs = 2000
localEpoch = 20
user_num = 20
user_parti_num = 10
batchSize = 32
lr = 0.01
print(f'iid = {iid}')
print(f'dirichlet = {dirichlet}')
print(f'shard = {shard}')
print(f'localEpoch (E) = {localEpoch}')
print(f'user_num (K) = {user_num}')
print(f'user_parti_num (C=Qs=Qc) = {user_parti_num}')
print(f'batchSize = {batchSize}')

'''Training data selection'''
cifar = True
mnist = False
fmnist = False
cinic = False
cifar100 = False
SVHN = False

'''Random seeds selection'''
seed_value = 2023
torch.manual_seed(seed_value)
np.random.seed(seed_value)
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
clip_grad = True

'''Hyperparameter Setting of GAS'''
Generate = True  # Whether to generate activations
Sample_Frequency = 1  # Sampling frequency
V_Test = False  # Calculate Gradient Dissimilarity
V_Test_Frequency = 1
Accu_Test_Frequency = 1
num_label = 100 if cifar100 else 10


'''Simulate real communication environments'''
WRTT = True   # True for simulation, False for no simulation
if WRTT is True:
    clients_computing = generate_computing(user_num)
    clients_position = generate_position(user_num)
    clients_rates = generate_communication_rate(user_num, clients_position)
    print(f'Simulate real communication environments')
    # print(clients_computing)
    # print(clients_position)
    # print(clients_rates)



'''Data loading and preprocessing'''
alldata, alllabel, test_set, transform = Dataset(cifar=cifar, mnist=mnist, fmnist=fmnist, cinic=cinic, cifar100=cifar100, SVHN=SVHN)
test_loader = dataloader.DataLoader(dataset=test_set, batch_size=128, shuffle=True)
train_index = np.arange(0, len(alldata))
random.shuffle(train_index)
train_img = np.array(alldata)[train_index]
train_label = np.array(alllabel)[train_index]
users_data = Data_Partition(iid, dirichlet, train_img, train_label, transform, 
                            user_num, batchSize, alpha, shard,
                            drop=False, classOfLabel=num_label)

'''generate local logit adjustment for each client'''
logit_local_adjustments = []
for i in range(user_num):
    logit_local_adjustments.append(compute_local_adjustment(users_data[i], device))

'''Model initialization'''
user_model, server_model = model_selection(cifar, mnist, fmnist, cinic=cinic, split=True, cifar100=cifar100, SVHN=SVHN)
user_model.to(device)
userParam = copy.deepcopy(user_model.state_dict())
usersParam = [copy.deepcopy(userParam) for _ in range(user_parti_num)]
optimizer_down = torch.optim.SGD(user_model.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
server_model.to(device)
optimizer_up = torch.optim.SGD(server_model.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
criterion = nn.CrossEntropyLoss()






class Client:
    def __init__(self, user_data, localEpoch, minibatch=0, computing=0, rate=0, time=0, weight_count=1):
        self.user_data = user_data
        self.dataloader_iter = iter(user_data)
        self.localEpoch = localEpoch
        self.count = 0
        '''Calculation of time'''
        self.minibatch = minibatch
        self.computing = computing
        self.rate = rate
        self.time = time
        # weight
        self.weight_count = weight_count

    def count_local_itera_E(self):
        # record the number of local iterations
        self.count += 1
        # if local iterations are completed
        if self.count == self.localEpoch:
            self.count = 0
            return True
        return False

    def train_one_iteration(self):
        try:
            data = next(self.dataloader_iter)
        except StopIteration:
            self.dataloader_iter = iter(self.user_data)
            data = next(self.dataloader_iter)
        return data

    '''Calculation of time'''
    def time_propagation(self):
        # AlexNet
        workload = 5603328  # Workload of one image FLOPs
        # workload *= self.minibatch  # parallel computations
        self.time += (workload / self.computing)

    def time_activation_transmit(self):
        # AlexNet
        activation = 131072  # Size of an activation value (bit) (64 * 8 * 8 * 32)
        activation *= self.minibatch
        self.time += (activation / self.rate)

    def time_model_transmit(self):
        # AlexNet
        model_volume = 620544
        self.time += (model_volume / self.rate)


'''IncrementalStats class for maintaining mean and variance'''
class IncrementalStats:
    def __init__(self, device):
        self.device = device
        self.means = {}
        self.variances = {}
        self.weight = {}
        self.counts = {}

    def update(self, new_mean, new_cov, new_weight, label):
        """
        Update the weighted mean and variance of the features for the given label.
        :param feature: Feature vector of type torch.Tensor
        :param label: Label
        """
        n = new_mean.shape[0]
        regularization_term = 1e-5
        I = torch.eye(n).to(self.device)

        if label not in self.means:
            self.means[label] = new_mean.to(self.device)
            self.variances[label] = new_cov.to(self.device)
            self.counts[label] = 1
            self.weight[label] = new_weight
        else:
            old_mean = self.means[label]
            old_cov = self.variances[label]
            old_weight = self.weight[label]
            self.weight[label] = old_weight + new_weight
            decay_factor = old_weight / self.weight[label]

            self.means[label] = decay_factor * old_mean + (1 - decay_factor) * new_mean
            self.variances[label] = decay_factor * (
                    old_cov + torch.outer(self.means[label] - old_mean, self.means[label] - old_mean)) \
                                    + (1 - decay_factor) * (new_cov + torch.outer(self.means[label] - new_mean,
                                                                                  self.means[
                                                                                      label] - new_mean)) + regularization_term * I
            self.counts[label] += 1

            ''' If the activation size is large and the covariance matrix of the activation values
                occupies a significant amount of memory, approximate the covariance matrix using a diagonal matrix.'''
            # old_mean = self.means[label]
            # old_var = self.variances[label]
            # old_weight = self.weight[label]
            # new_var = new_cov
            # self.weight[label] = old_weight + new_weight
            # decay_factor = old_weight / self.weight[label]
            # self.means[label] = decay_factor * old_mean + (1 - decay_factor) * new_mean
            # self.variances[label] = decay_factor * (
            #         old_var + (self.means[label] - old_mean) ** 2
            # ) + (1 - decay_factor) * (
            #                                 new_var + (self.means[label] - new_mean) ** 2
            #                         ) + regularization_term
            # self.counts[label] += 1


    def get_stats(self, label):
        """
        Get the weighted mean and variance for the given label.
        :param label: Label
        :return: Weighted mean and variance of type torch.Tensor
        """
        return self.means.get(label, None), self.variances.get(label, None)








'''Main Train'''
'''Clients initialization'''
if WRTT is True:
    clients = [Client(users_data[i], localEpoch, batchSize, clients_computing[i], clients_rates[i], 0) for i in range(user_num)]
else:
    clients = [Client(users_data[i], localEpoch) for i in range(user_num)]

stats = IncrementalStats(device=device)
condensed_data = {c: None for c in range(num_label)}


'''从K个客户端中随机选择C个客户端'''
order = np.random.choice(range(user_num), user_parti_num, replace=False)  
print(f'order = {order}')
'''initialize training time'''
# 耗时：C个客户端的客户端侧模型前向传播
# 耗时：C个客户端的激活传输
if WRTT is True:  
    for i in order:
        clients[i].time_propagation()
        clients[i].time_activation_transmit()

'''Training loop'''
total_accuracy = []
total_v_value = []
local_models_time = []
time_record = []
concat_features = None
concat_labels = None
concat_weight_counts = None
sumClientParam = None
feature_shape = None
count_activation = 0  # activation cache 中的激活个数
count_client_model = 0  # client-side model cache 中的模型个数
count_activa_full = 0  # 大小为Qs的激活缓存满了的次数
count_client_mdl_full = 0 # 大小为Qc的客户端模型缓存满了的次数
total_weight_count = 1
train_begin_time = datetime.datetime.now()
while count_client_mdl_full != Epochs:
    user_model.train()
    server_model.train()

    '''select a client in client set order (C clients)'''
    if WRTT is True:
        selected_client = find_client_with_min_time(clients, order)
    else:
        selected_client = np.random.choice(order)
    # print(f'selected_client = {selected_client}')
    user_model.load_state_dict(usersParam[np.where(order == selected_client)[0][0]], strict=True)
    
    '''Client-side model forward propagation'''
    images, labels = clients[selected_client].train_one_iteration()
    # print(f'images.shape = {images.shape}') # torch.Size([batchSize, 3, 32, 32])
    # print(f'labels.shape = {labels.shape}') # torch.Size([batchSize])
    images = images.to(device)
    labels = labels.to(device)
    split_layer_output = user_model(images)
    # print(f'split_layer_output.shape = {split_layer_output.shape}') # torch.Size([batchSize, 64, 8, 8])
    if feature_shape is None:
        feature_shape = split_layer_output[0].shape

    '''Define the weight vector to record the weight of each activation value'''
    weight_count = clients[selected_client].weight_count
    weight_vector = torch.tensor([weight_count] * split_layer_output.size(0))

    '''generate concatenated activation'''
    count_activation += 1
    # print(f"count_activation = {count_activation}")
    if concat_features is None:
        concat_features = split_layer_output.detach()
        concat_labels = labels
        concat_weight_counts = weight_vector
    else:
        concat_features = torch.cat((concat_features, split_layer_output.detach()), dim=0)
        concat_labels = torch.cat((concat_labels, labels), dim=0)
        concat_weight_counts = torch.cat((concat_weight_counts, weight_vector), dim=0)

    '''Update weight of clients'''
    clients[selected_client].weight_count = clients[selected_client].weight_count + 1

    '''Server-side model forward propagation'''
    local_output = server_model(split_layer_output)
    # print(f'local_output.shape = {local_output.shape}') # torch.Size([32, 10])
    # print(f'local_output[0] = {local_output[0]}') 
    # print(f'logit_local_adjustments[selected_client].shape = {logit_local_adjustments[selected_client].shape}') # torch.Size([10])
    # hhhhhhhhhhhh
    

    '''client-side model update'''
    # localLoss = criterion(local_output, labels)
    localLoss = criterion(local_output + logit_local_adjustments[selected_client], labels.long())
    optimizer_down.zero_grad()
    localLoss.backward()
    if clip_grad:
        torch.nn.utils.clip_grad_norm_(parameters=user_model.parameters(), max_norm=10)
    optimizer_down.step()
    usersParam[np.where(order == selected_client)[0][0]] = copy.deepcopy(user_model.state_dict())
    if WRTT is True:  # Record the time of the backward pass
        # 耗时：客户端selected_client的客户端侧模型反向传播
        clients[selected_client].time_propagation()













    '''Activations generation and server-side model update'''
    if count_activation == user_parti_num:
        count_activa_full += 1 # 大小为Qs的激活缓存满了的次数
        print(f"count_activa_full = {count_activa_full}")
        '''Activation distributions update'''
        unique_labels, counts = concat_labels.unique(return_counts=True)  # Count how many of each label

        label_weights = {}
        concat_weight_counts = concat_weight_counts.to(device)
        for label in unique_labels:
            mask = (concat_labels == label)
            weights_of_label = concat_weight_counts[mask].float()
            label_weights[label.item()] = weights_of_label.sum().item()

        '''Calculate mean and variance'''
        flatten_features = concat_features.flatten(start_dim=1)
        for label in unique_labels:
            mask = (concat_labels == label)
            features_of_label = flatten_features[mask]
            weights_of_label = concat_weight_counts[mask].float()
            total_weight = weights_of_label.sum()
            mean_feature = torch.sum(features_of_label * weights_of_label[:, None], dim=0) / total_weight
            centered_features = features_of_label - mean_feature
            cov_matrix = torch.matmul((centered_features * weights_of_label[:, None]).T,
                                      centered_features) / total_weight
            stats.update(mean_feature, cov_matrix, label_weights[label.item()], label.item())
            ''' If the activation size is large and the covariance matrix of the activation values
                occupies a significant amount of memory, approximate the covariance matrix using a diagonal matrix.'''
            # var_vector = torch.sum((centered_features ** 2) * weights_of_label[:, None], dim=0) / total_weight
            # stats.update(mean_feature, var_vector, label_weights[label.item()], label.item())
        
        '''Activations generation'''
        if Generate is True:
            if count_activa_full % Sample_Frequency == 0:
                # Ensure that all labels have mean and variance
                all_labels_have_stats = True
                for label in range(num_label):
                    if stats.get_stats(label) == (None, None):
                        all_labels_have_stats = False
                        break
                if all_labels_have_stats:
                    concat_features, concat_labels = sample_or_generate_features(concat_features, concat_labels,
                                                                                 batchSize, num_label, feature_shape,
                                                                                 device, stats)

        '''server-side model forward propagation'''
        # count_activa_full 是服务器端模型的更新次数
        for param in server_model.parameters():
            param.requires_grad = True
        final_output = server_model(concat_features)
        # print(f'concat_features.shape = {concat_features.shape}') # torch.Size([320, 64, 8, 8])
        # print(f'final_output.shape = {final_output.shape}') # torch.Size([320, 10])
        # print(f'concat_labels.shape = {concat_labels.shape}') # torch.Size([320])
        '''server-side model update'''
        loss = criterion(final_output, concat_labels.long())
        optimizer_up.zero_grad()
        loss.backward()
        if clip_grad:
            torch.nn.utils.clip_grad_norm_(parameters=server_model.parameters(), max_norm=10)
        optimizer_up.step()

        if V_Test is True: # Calculate Gradient Dissimilarity
            concat_labels_V = copy.deepcopy(concat_labels)
            concat_features_V = copy.deepcopy(concat_features)
        count_activation = 0
        concat_labels = None
        concat_features = None
        concat_weight_counts = None
















    replace = clients[selected_client].count_local_itera_E()
    # 如果客户端selected_client完成了E次本地迭代，并发送它的客户端模型参数给服务器
    if replace:  # If local iterations are completed, select a new client
        count_client_model += 1 # 缓存中的客户端模型的个数
        print(f"count_client_model = {count_client_model}")
        '''Client-side model transmission'''
        if WRTT is True:  # Record the time of model upload
            # 耗时：客户端selected_client的客户端侧模型传输
            clients[selected_client].time_model_transmit()
            local_models_time.append(clients[selected_client].time)
        '''Client-side models aggregation'''
        ind_select_client_order = np.where(order == selected_client)[0][0]
        if sumClientParam is None:
            sumClientParam = usersParam[ind_select_client_order]
            for key in usersParam[ind_select_client_order]:
                sumClientParam[key] = usersParam[ind_select_client_order][key] * (1 / user_parti_num)
        else:
            for key in usersParam[ind_select_client_order]:
                sumClientParam[key] += usersParam[ind_select_client_order][key] * (1 / user_parti_num)
        
        if count_client_model == user_parti_num:  # if the buffer is full, update the client model 
            total_weight_count += count_activa_full
            '''global Client-side models update'''
            userParam = copy.deepcopy(sumClientParam)
            sumClientParam = None
            count_client_model = 0

            test_flag = ((count_client_mdl_full + 1) % Accu_Test_Frequency == 0)
            count_client_mdl_full += 1

            if WRTT:
                if test_flag:
                    time_record.append(max(local_models_time))
                local_models_time = []

            # Accuracy test
            if test_flag:
                user_model.eval()
                server_model.eval()
                user_model.load_state_dict(userParam, strict=True)
                with torch.no_grad():
                    correct = 0
                    total = 0
                    for images, labels in test_loader:
                        images, labels = images.to(device), labels.to(device)
                        output = user_model(images)
                        output = server_model(output)
                        _, predicted = torch.max(output.data, 1)
                        total += labels.size(0)
                        correct += (predicted == labels).sum().item()
                    accuracy = correct / total
                total_accuracy.append(accuracy)
                print(f"count_client_mdl_full (Global iteration) = {count_client_mdl_full}")
                print(f"Time (simula) = {time_record[-1]:.4f}(s), Accuracy = {total_accuracy[-1]:.4f}")
                print()

            # V test
            if V_Test and (count_client_mdl_full + 1) % V_Test_Frequency == 0:
                v_value = calculate_v_value(server_model, user_model, concat_features_V, concat_labels_V, test_loader,
                                            criterion, device)
                print(f"Epoch {count_client_mdl_full + 1}, V Value: {v_value}")
                total_v_value.append(v_value)


        '''Client-side model distribution'''
        index = np.where(order == selected_client)[0][0]
        # print(f"new client index = {index}")
        usersParam[index] = userParam

        '''select new client'''
        # 从K个客户端中，随机选择1个不在目前被选的C个客户端order之中的客户端，替换掉order之中的客户端selected_client
        print(f"order = {order}")
        order = replace_user(order, selected_client, user_num)
        print(f"order = {order}")
        # print(f"old client {selected_client} out order")
        # print(f"new client {order[index]} in order")
        clients[order[index]].weight_count = total_weight_count
        if WRTT is True:  # Initialize the time for the new client
            begin_time = clients[selected_client].time
            clients[order[index]].time = begin_time
            # 耗时：客户端order[index]的客户端侧模型前向传播
            # 耗时：客户端order[index]的激活传输
            clients[order[index]].time_propagation()
            clients[order[index]].time_activation_transmit()
    else:
        if WRTT is True:  # Record the training time if the client continues with local iterations
            # 耗时：客户端selected_client的客户端侧模型前向传播
            # 耗时：客户端selected_client的激活传输
            clients[selected_client].time_propagation()
            clients[selected_client].time_activation_transmit()










'''Output results'''
print(f"time_record = {time_record}")
print(f"total_accuracy = {total_accuracy}")
print(f"total_v_value = {total_v_value}")
time_record_str = ', '.join(str(x) for x in time_record)
total_accuracy_str = ', '.join(str(x) for x in total_accuracy)
total_v_value_str = ', '.join(str(x) for x in total_v_value)
print('time = [' + time_record_str + ']')
print('GAS = [' + total_accuracy_str + ']')

end_time = datetime.datetime.now()
begin_time_str = train_begin_time.strftime("%Y-%m-%d %H:%M:%S")
end_time_str = end_time.strftime("%Y-%m-%d %H:%M:%S")

selectDataset = "cifar10" if cifar else "mnist" if mnist else "fmnist" if fmnist else "cinic" if cinic else "cifar100" \
    if cifar100 else "SVHN" if SVHN else "None"
selectMethod = "Generative Activation-Aided" if Generate else "Original"
IfCilp = "clip" if clip_grad else "not clip"

with open('GAS_main.txt', 'w') as f:
    if dirichlet:
        f.write(
            f'seed_value: {seed_value}; alpha: {alpha}; epochs: {Epochs}; {selectDataset}; local epoch: {localEpoch}; {IfCilp};\n'
            f'num of clients: {user_num}; num of participating clients: {user_parti_num}; batchsize: {batchSize}; learning rate: {lr}; \n'
            f'Method: {selectMethod}; sample frequency: {Sample_Frequency}; \n')
    else:
        f.write(
            f'seed_value: {seed_value}; shard: {shard}; epochs: {Epochs}; {selectDataset}; local epoch: {localEpoch}; {IfCilp};\n'
            f'num of clients: {user_num}; num of participating clients: {user_parti_num}; batchsize: {batchSize}; learning rate: {lr}; \n'
            f'Method: {selectMethod}; sample frequency: {Sample_Frequency}; \n')
    if V_Test is True: # Calculate Gradient Dissimilarity
        f.write(f'Test Frequency is {V_Test_Frequency}; \n')
        f.write('Gradient Dissimilarity = [' + total_v_value_str + ']\n')

    if WRTT is True:
        clients_computing_str = ', '.join(str(x) for x in clients_computing)
        clients_position_str = ', '.join(str(x) for x in clients_position)
        clients_rates_str = ', '.join(str(x) for x in clients_rates)
        f.write('clients computing = [' + clients_computing_str + ']\n')
        f.write('clients position = [' + clients_position_str + ']\n')
        f.write('clients rates = [' + clients_rates_str + ']\n')
        f.write('time = [' + time_record_str + ']\n')

    f.write(f'{begin_time_str} ~ {end_time_str};\n')
    f.write('GAS = [' + total_accuracy_str + ']\n')


