'''
Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas, “Communication-efficient learning of deep networks from decentralized data,” in Proc. Conf. Mach. Learn. Res., Fort Lauderdale, FL, USA, Apr. 2017, pp. 1–10.
'''

import sys
import os

# Get the absolute path of the current file's directory
current_dir = os.path.dirname(os.path.abspath(__file__))
# # Get the parent directory of the current directory
# parent_dir = os.path.dirname(current_dir)
# Add the 'utils' directory to sys.path
utils_path = os.path.join(current_dir, 'utils')
if utils_path not in sys.path:
    sys.path.append(utils_path)

import copy
import torch
import torch.nn as nn
import torch.utils.data.dataloader as dataloader
import os
import random
import numpy as np
from network_AlexNet import model_selection
from dataset import Dataset, Data_Partition
from utils import generate_computing, generate_position, generate_communication_rate
import datetime
begin_time = datetime.datetime.now()


os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

'''Experimental parameter settings'''
iid = False
dirichlet = False
shard = 2
alpha = 0.1
Rounds = 2000
localEpoch = 20
user_num = 20
user_parti_num = 10
batchSize = 32
lr = 0.01
print(f'iid = {iid}')
print(f'dirichlet = {dirichlet}')
print(f'shard = {shard}')
print(f'localEpoch (E) = {localEpoch}')
print(f'user_num (K) = {user_num}')
print(f'user_parti_num (C=Qs=Qc) = {user_parti_num}')
print(f'batchSize = {batchSize}')

'''Random seeds selection'''
seed_value = 2023
torch.manual_seed(seed_value)
np.random.seed(seed_value)
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)


Accu_Test_Frequency = 1

'''Simulate real communication environments'''
WRTT = True   # True for simulation, False for no simulation
if WRTT is True:
    clients_computing = generate_computing(user_num)
    clients_position = generate_position(user_num)
    clients_rates = generate_communication_rate(user_num, clients_position)
    print(f'Simulate real communication environments')
    # print(clients_computing)
    # print(clients_position)
    # print(clients_rates)


'''Training data selection'''
cifar = True
mnist = False
fmnist = False
cinic = False
cifar100 = False
SVHN = False
selectDataset = "cifar10" if cifar else "mnist" if mnist else "fmnist" if fmnist else "cinic" if cinic else "cifar100" if cifar100 else "SVHN" if SVHN else "None"
print(f'selectDataset = {selectDataset}')

if cifar100:
    classOfLabel = 100
else:
    classOfLabel = 10

'''Data loading and preprocessing'''
alldata, alllabel, test_set, transform = Dataset(cifar=cifar, mnist=mnist, fmnist=fmnist, cinic=cinic, cifar100=cifar100, SVHN=SVHN)
test_loader = dataloader.DataLoader(dataset=test_set, batch_size=128, shuffle=True)
train_index = np.arange(0, len(alldata))
random.shuffle(train_index)
train_img = np.array(alldata)[train_index]
train_label = np.array(alllabel)[train_index]
# Partition data among users
users_data = Data_Partition(iid, dirichlet, train_img, train_label, transform,
                            user_num, batchSize, alpha, shard, drop=False, classOfLabel=classOfLabel)

# =========================================================
# ==============      initialization        ===============
# =========================================================

'''Model initialization'''
model = model_selection(cifar=cifar, mnist=mnist, fmnist=fmnist, cinic=cinic, cifar100=cifar100, SVHN=SVHN, split=False)
model.to(device)
userParam = copy.deepcopy(model.state_dict())
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
criterion = nn.CrossEntropyLoss()





class Client:
    def __init__(self, user_data, local_epoch, minibatch=0, computing=0, rate=0, time=0):
        self.user_data = user_data
        self.dataloader_iter = iter(user_data)
        self.local_epoch = local_epoch
        self.count = 0  # Record the number of local iterations
        '''Calculation of time'''
        self.minibatch = minibatch
        self.computing = computing
        self.rate = rate
        self.time = time

    def increment_counter(self):
        # Record the number of local iterations
        self.count += 1
        if self.count == self.local_epoch:
            self.count = 0
            return True
        return False

    def train_one_iteration(self):
        try:
            data = next(self.dataloader_iter)
        except StopIteration:
            self.dataloader_iter = iter(self.user_data)
            data = next(self.dataloader_iter)
        return data

    '''Calculation of time'''
    def time_propagation(self):
        workload = 69833728  # Workload of one image FLOPs
        # workload *= self.minibatch  # parallel computations
        self.time += (workload / self.computing)

    def time_model_transmit(self):
        model_volume = 123799872
        self.time += (model_volume / self.rate)


'''Initialize clients'''
if WRTT is True:
    clients = [Client(users_data[kk], localEpoch, batchSize, clients_computing[kk], clients_rates[kk], 0) for kk in range(user_num)]
else:
    clients = [Client(users_data[kk], localEpoch) for kk in range(user_num)]

# =========================================================
# ================       training        ==================
# =========================================================

total_accuracy = []
total_loss = []
if WRTT is True:  # record the training time
    time_record = []
    current_time = 0

for n_round in range(Rounds):
    model.train()

    sumParam = None
    avgloss = 0

    '''Randomly select user_parti_num users'''
    # m ← max(C · K, 1)
    # St ← (random set of m clients)
    order = np.random.choice(range(user_num), user_parti_num, replace=False)
    print(f'round={n_round+1}/{Rounds}, The selected clients: {order}')
    test_flag = ((n_round + 1) % Accu_Test_Frequency == 0)

    if WRTT is True:  # record the training time
        local_models_time = []
        for kk in order:
            clients[kk].time = current_time
        #     for ee in range(localEpoch):
        #         clients[kk].time_propagation()
        #         clients[kk].time_propagation()
        #     clients[kk].time_model_transmit()
        #     local_models_time.append(clients[kk].time)
        # current_time = max(local_models_time)
        # if test_flag:
        #     time_record.append(current_time)
        # print('Time: ', current_time)

    # mt ← ∑  k∈St nk
    total_data_points = sum([len(users_data[kk].dataset) for kk in order])
    # nk / mt
    for kk in order:
        clients[kk].fed_avg_freq = len(users_data[kk].dataset) / total_data_points

    for kk in order:
        '''1. The server transmits model parameters to the client'''
        model.load_state_dict(copy.deepcopy(userParam), strict=True)

        # for each local epoch i from 1 to E do
        for iteration in range(localEpoch):
            images, labels = clients[kk].train_one_iteration()
            # print(f'images.shape = {images.shape}') # torch.Size([batchSize, 3, 32, 32])
            # print(f'labels.shape = {labels.shape}') # torch.Size([batchSize])
            images = images.to(device)
            labels = labels.to(device)

            '''2. The client performs forward propagation'''
            final_output = model(images)
            # print(f'final_output.shape = {final_output.shape}') # torch.Size([32, 10])
            # hhhhhhhhhhhhh
            loss = criterion(final_output, labels.long())
            if WRTT is True:  # record the training time
                clients[kk].time_propagation()
            
            '''3. The client performs backward propagation'''
            # w ← w − η \delta l(w; b)
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=10)
            optimizer.step()
            if WRTT is True:  # record the training time
                clients[kk].time_propagation()
        # print('Epoch/user [{}/{}], Loss: {:.4f}'.format(n_round + 1, kk, loss.item()))
        avgloss += loss.item()

        '''4. The client transmits model parameters to the server'''
        if WRTT is True:  # record the training time
            clients[kk].time_model_transmit()
            local_models_time.append(clients[kk].time)

        '''The server aggregates model parameters from the client'''
        if sumParam is None:
            sumParam = model.state_dict()
            for key in model.state_dict():
                sumParam[key] = model.state_dict()[key].clone() * clients[kk].fed_avg_freq
        else:
            for key in model.state_dict():
                sumParam[key] += model.state_dict()[key].clone() * clients[kk].fed_avg_freq
    
    '''record the training time for this round'''
    if WRTT is True:
        current_time = max(local_models_time)
        # print(f'Time={current_time:.2f}(s)')


    userParam = copy.deepcopy(sumParam)

    avgloss = avgloss / user_parti_num
    total_loss.append(avgloss)
    


    '''testing per round'''
    model.eval()
    model.load_state_dict(userParam, strict=True)
    if test_flag:
        with torch.no_grad():
            correct = 0
            total = 0
            for (images, labels) in test_loader:
                images, labels = images.to(device), labels.to(device)
                output = model(images)
                _, predicted = torch.max(output.data, 1)
                total += labels.size(0)
                # pdb.set_trace()
                correct += (predicted == labels).sum().item()
            total_accuracy.append(correct / total)
        if WRTT is True:
            time_record.append(current_time)
            print(f'round={n_round+1}/{Rounds}, Time={current_time:.2f}(s), Accuracy={total_accuracy[-1]:.3f}')   
        else:     
            print(f'round={n_round+1}/{Rounds}, Accuracy={total_accuracy[-1]:.3f}')   
        # print()









'''Output results'''
if WRTT is True:
    print(f"time_record = {time_record}")
    time_record_str = ', '.join(str(x) for x in time_record)
print(f"total_accuracy = {total_accuracy}")
total_accuracy_str = ', '.join(str(x) for x in total_accuracy)
print('time = [' + time_record_str + ']')
print('FedAvg = [' + total_accuracy_str + ']')
print(total_loss)

end_time = datetime.datetime.now()

begin_time_str = begin_time.strftime("%Y-%m-%d %H:%M:%S")
end_time_str = end_time.strftime("%Y-%m-%d %H:%M:%S")

selectDataset = "cifar10" if cifar else "mnist" if mnist else "fmnist" if fmnist else "cinic" if cinic else "cifar100" \
    if cifar100 else "SVHN" if SVHN else "None"

with open('FedAvg.txt', 'w') as f:
    if dirichlet:
        f.write(
            f'seed_value: {seed_value}; alpha: {alpha}; Rounds: {Rounds}; {selectDataset}; local n_round: {localEpoch}; \n'
            f'num of clients: {user_num}; num of participating clients: {user_parti_num}; batchsize: {batchSize}; learning rate: {lr}; \n')
    else:
        f.write(
            f'seed_value: {seed_value}; shard: {shard}; Rounds: {Rounds}; {selectDataset}; local n_round: {localEpoch}; \n'
            f'num of clients: {user_num}; num of participating clients: {user_parti_num}; batchsize: {batchSize}; learning rate: {lr}; \n')
    f.write(begin_time_str + ' ~ ' + end_time_str + '\n')
    if WRTT is True:
        clients_computing_str = ', '.join(str(x) for x in clients_computing)
        clients_position_str = ', '.join(str(x) for x in clients_position)
        clients_rates_str = ', '.join(str(x) for x in clients_rates)
        f.write('clients computing = [' + clients_computing_str + ']\n')
        f.write('clients position = [' + clients_position_str + ']\n')
        f.write('clients rates = [' + clients_rates_str + ']\n')
        f.write('time = [' + time_record_str + ']\n')
    f.write('FedAvg = [' + total_accuracy_str + ']\n')

