#============================================================================
# Splitfed learning: ResNet18 on HAM10000
# HAM10000 dataset: Tschandl, P.: The HAM10000 dataset, a large collection of multi-source dermatoscopic images of common pigmented skin lesions (2018), doi:10.7910/DVN/DBW86T

# We have three versions of our implementations
# Version1: without using socket and no DP+PixelDP
# Version2: with using socket but no DP+PixelDP
# Version3: without using socket but with DP+PixelDP

# This program is Version1: Single program simulation 
# ============================================================================
import torch
from torch import nn
from torch.utils.data import DataLoader
from pandas import DataFrame
import random
import numpy as np
import copy

from resnet_module import ResNet18_client_side
from resnet_module import Baseblock
from resnet_module import ResNet18_server_side
from dataloading import dataloadingHAM10000
from dataloading import dataset_iid
from dataloading import DatasetSplit
from dataloading import calculate_accuracy
from dataloading import FedAvg

SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
if torch.cuda.is_available():
    torch.backends.cudnn.deterministic = True
    print(torch.cuda.get_device_name(0))    
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# To print in color -------test/train of the client side
def prRed(skk): print("\033[91m {}\033[00m" .format(skk)) 
def prGreen(skk): print("\033[92m {}\033[00m" .format(skk)) 


#===================================================================
SFLV2 = False
SFLV1 = not SFLV2
if SFLV1:
    program = "SFLV1 ResNet18 on HAM10000"
elif SFLV2:
    program = "SFLV2 ResNet18 on HAM10000"
print(f"---------{program}----------") # this is to identify the program in the slurm outputs files
    


#===================================================================
num_users = 5 # No. of users
epochs = 200
frac = 1        # participation of clients; if 1 then 100% clients participate in SFL
lr = 0.0001


print(f"Client-side Model definition")     
net_glob_client = ResNet18_client_side()
if torch.cuda.device_count() > 1:
    print("We use",torch.cuda.device_count(), "GPUs")
    net_glob_client = nn.DataParallel(net_glob_client)   # to use the multiple GPUs; later we can change this to CPUs only 

net_glob_client.to(device)
# print(net_glob_client)   

print(f"Server-side Model definition")  
net_glob_server = ResNet18_server_side(Baseblock, [2,2,2], 7) #7 is my numbr of classes
if torch.cuda.device_count() > 1:
    print("We use",torch.cuda.device_count(), "GPUs")
    net_glob_server = nn.DataParallel(net_glob_server)   # to use the multiple GPUs 

net_glob_server.to(device)
# print(net_glob_server)      

#===================================================================================
# For Server Side Loss and Accuracy 
loss_train_collect = []
acc_train_collect = []
loss_test_collect = []
acc_test_collect = []
batch_acc_train = []
batch_loss_train = []
batch_acc_test = []
batch_loss_test = []

criterion = nn.CrossEntropyLoss()
count1 = 0
count2 = 0








#====================================================================================================
#                                  Server Side Programs
#====================================================================================================
# to print train - test together in each round-- these are made global
acc_avg_all_user_train = 0
loss_avg_all_user_train = 0
loss_train_collect_user = []
acc_train_collect_user = []
loss_test_collect_user = []
acc_test_collect_user = []

if SFLV1:
    w_glob_server = net_glob_server.state_dict()
    w_locals_server = []
    # Initialization of net_model_server and net_server (server-side model)
    # num_users 个客户端的服务器端模型的列表
    net_model_server = [net_glob_server for i in range(num_users)]
    net_server = copy.deepcopy(net_model_server[0]).to(device)
    #optimizer_server = torch.optim.Adam(net_server.parameters(), lr = lr)

#client idx collector
idx_collect = []
l_epoch_check = False
fed_check = False









'''Server-side function associated with Training''' 
def train_server(activation_detach, y, l_epoch_count, l_epoch, idx, len_batch):
    if SFLV1: # 每个客户端有一个服务器端模型 net_server
        global net_model_server, criterion, optimizer_server
        global w_locals_server, w_glob_server, net_server
    elif SFLV2: # 所有客户端共享一个服务器端模型 net_glob_server
        global net_glob_server, criterion
    global device, batch_acc_train, batch_loss_train, l_epoch_check, fed_check
    global loss_train_collect, acc_train_collect, count1, acc_avg_all_user_train, loss_avg_all_user_train, idx_collect
    global loss_train_collect_user, acc_train_collect_user, lr
    
    if SFLV1: # net_server 是客户端 idx 对应的服务器端模型
        net_server = copy.deepcopy(net_model_server[idx]).to(device)
        net_server.train()
        optimizer_server = torch.optim.Adam(net_server.parameters(), lr = lr)
    elif SFLV2: # 所有客户端只对应一个服务器端模型
        net_glob_server.train()
        optimizer_server = torch.optim.Adam(net_glob_server.parameters(), lr = lr)

    
    # train and update
    optimizer_server.zero_grad()
    
    activation_detach = activation_detach.to(device)
    y = y.to(device)
    
    '''Server-side model FP'''
    if SFLV1:
        fx_server = net_server(activation_detach)
    elif SFLV2:
        fx_server = net_glob_server(activation_detach)
    
    # calculate loss
    loss = criterion(fx_server, y)
    # calculate accuracy
    acc = calculate_accuracy(fx_server, y)
    
    '''Server-side model BP'''
    loss.backward()
    gradient_acti = activation_detach.grad.clone().detach()
    optimizer_server.step()
    
    batch_loss_train.append(loss.item())
    batch_acc_train.append(acc.item())
    
    # SFLV1: Update the server-side model for the current batch
    # net_model_server 是所有客户端对应的服务器端模型列表
    net_model_server[idx] = copy.deepcopy(net_server)
    # SFLV2: server-side model net_glob_server is global so it is updated automatically in each pass to this function

    # count1: to track the completion of the local batch associated with one client
    count1 += 1
    if count1 == len_batch:
        acc_avg_train = sum(batch_acc_train)/len(batch_acc_train) # it has accuracy for one batch
        loss_avg_train = sum(batch_loss_train)/len(batch_loss_train)
        
        batch_acc_train = []
        batch_loss_train = []
        count1 = 0
        
        prRed('Client{} Train => Local Epoch: {} \tAcc: {:.3f} \tLoss: {:.4f}'.format(idx, l_epoch_count, acc_avg_train, loss_avg_train))
        
        if SFLV1:
            # copy the last trained model in the batch    
            # w_server 是客户端 idx 对应的服务器端模型的参数   
            w_server = net_server.state_dict()      
        
        # If one local epoch is completed, after this a new client will come
        if l_epoch_count == l_epoch-1:
            l_epoch_check = True # to evaluate_server function - to check local epoch has completed or not
            if SFLV1:
                # We store the state of the net_glob_server() 
                # w_locals_server 是所有客户端对应的服务器端模型的参数的列表 
                w_locals_server.append(copy.deepcopy(w_server))
            
            # we store the last accuracy in the last batch of the epoch and it is not the average of all local epochs
            # this is because we work on the last trained model and its accuracy (not earlier cases)
            
            #print("accuracy = ", acc_avg_train)
            acc_avg_train_all = acc_avg_train
            loss_avg_train_all = loss_avg_train
                        
            # accumulate accuracy and loss for each new user
            loss_train_collect_user.append(loss_avg_train_all)
            acc_train_collect_user.append(acc_avg_train_all)
            
            # collect the id of each new user                        
            if idx not in idx_collect:
                idx_collect.append(idx) 
                #print(idx_collect)
        
        '''federation process'''
        if len(idx_collect) == num_users:
            fed_check = True # to evaluate_server function  - to check fed check has hitted
            # Federation process at Server-Side------------------------- 
            # output print and update is done in evaluate_server() for nicer display 
            if SFLV1: # 联邦平均聚合所有客户端对应的服务器端模型  
                '''server-side global model update'''                    
                w_glob_server = FedAvg(w_locals_server)   
                net_glob_server.load_state_dict(w_glob_server)    
                '''distribute server-side global model to all clients'''
                net_model_server = [net_glob_server for i in range(num_users)]
                w_locals_server = []
            idx_collect = []
            acc_avg_all_user_train = sum(acc_train_collect_user)/len(acc_train_collect_user)
            loss_avg_all_user_train = sum(loss_train_collect_user)/len(loss_train_collect_user)
            loss_train_collect.append(loss_avg_all_user_train)
            acc_train_collect.append(acc_avg_all_user_train)
            acc_train_collect_user = []
            loss_train_collect_user = []
    '''send gradients to the client'''               
    return gradient_acti

'''Server-side functions associated with Testing'''
def evaluate_server(activation_detach, y, idx, len_batch, ell):
    if SFLV1:
        global net_model_server, check_fed, net_server 
        global w_glob_server
    global net_glob_server, criterion, batch_acc_test, batch_loss_test
    global loss_test_collect, acc_test_collect, count2, num_users, acc_avg_train_all, loss_avg_train_all, l_epoch_check, fed_check
    global loss_test_collect_user, acc_test_collect_user, acc_avg_all_user_train, loss_avg_all_user_train
    
    if SFLV1:
        net = copy.deepcopy(net_model_server[idx]).to(device)
        net.eval()
    elif SFLV2:
        net_glob_server.eval()
  
    with torch.no_grad():
        activation_detach = activation_detach.to(device)
        y = y.to(device) 
        #---------forward prop-------------
        if SFLV1:
            fx_server = net(activation_detach)
        elif SFLV2:
            fx_server = net_glob_server(activation_detach)
        
        # calculate loss
        loss = criterion(fx_server, y)
        # calculate accuracy
        acc = calculate_accuracy(fx_server, y)
        
        
        batch_loss_test.append(loss.item())
        batch_acc_test.append(acc.item())
        
               
        count2 += 1
        if count2 == len_batch:
            acc_avg_test = sum(batch_acc_test)/len(batch_acc_test)
            loss_avg_test = sum(batch_loss_test)/len(batch_loss_test)
            
            batch_acc_test = []
            batch_loss_test = []
            count2 = 0
            
            prGreen('Client{} Test =>                   \tAcc: {:.3f} \tLoss: {:.4f}'.format(idx, acc_avg_test, loss_avg_test))
            
            # if a local epoch is completed   
            if l_epoch_check:
                l_epoch_check = False
                
                # Store the last accuracy and loss
                acc_avg_test_all = acc_avg_test
                loss_avg_test_all = loss_avg_test
                        
                loss_test_collect_user.append(loss_avg_test_all)
                acc_test_collect_user.append(acc_avg_test_all)
                
            # if federation is happened----------                    
            if fed_check:
                fed_check = False
                print("------------------------------------------------")
                print("------ Federation process at Server-Side ------- ")
                print("------------------------------------------------")
                
                acc_avg_all_user = sum(acc_test_collect_user)/len(acc_test_collect_user)
                loss_avg_all_user = sum(loss_test_collect_user)/len(loss_test_collect_user)
            
                loss_test_collect.append(loss_avg_all_user)
                acc_test_collect.append(acc_avg_all_user)
                acc_test_collect_user = []
                loss_test_collect_user= []
                              
                print("====================== SERVER V1==========================")
                print(' Train: Round {:3d}, Avg Accuracy {:.3f} | Avg Loss {:.3f}'.format(ell, acc_avg_all_user_train, loss_avg_all_user_train))
                print(' Test: Round {:3d}, Avg Accuracy {:.3f} | Avg Loss {:.3f}'.format(ell, acc_avg_all_user, loss_avg_all_user))
                print("==========================================================")
         
    return 













#==============================================================================================================
#                                       Clients Side Program
#==============================================================================================================
'''Client-side functions associated with Training and Testing'''
class Client(object):
    def __init__(self, net_client_model, idx, lr, device, dataset_train = None, dataset_test = None, idxs = None, idxs_test = None):
        self.idx = idx
        self.device = device
        self.lr = lr
        self.local_ep = 1
        #self.selected_clients = []
        self.ldr_train = DataLoader(DatasetSplit(dataset_train, idxs), batch_size = 256, shuffle = True)
        self.ldr_test = DataLoader(DatasetSplit(dataset_test, idxs_test), batch_size = 256, shuffle = True)
        

    def train(self, net):
        net.train()
        optimizer_client = torch.optim.Adam(net.parameters(), lr = self.lr) 
        for iter in range(self.local_ep):
            len_batch = len(self.ldr_train)
            for batch_idx, (images, labels) in enumerate(self.ldr_train):
                images, labels = images.to(self.device), labels.to(self.device)
                # print(f'images.shape = {images.shape}') # torch.Size([batchSize, 3, 32, 32])
                # print(f'labels.shape = {labels.shape}') # torch.Size([batchSize])
                optimizer_client.zero_grad()
                '''Client-side model FP'''
                activation = net(images)
                activation_detach = activation.clone().detach().requires_grad_(True)
                '''Sending activations to server and receiving gradients from server'''
                gradient_acti = train_server(activation_detach, labels, iter, self.local_ep, self.idx, len_batch)
                '''Client-side model BP '''
                activation.backward(gradient_acti)
                optimizer_client.step()
            #prRed('Client{} Train => Epoch: {}'.format(self.idx, ell))
        return net.state_dict() 
    
    def evaluate(self, net, ell):
        net.eval()
        with torch.no_grad():
            len_batch = len(self.ldr_test)
            for batch_idx, (images, labels) in enumerate(self.ldr_test):
                images, labels = images.to(self.device), labels.to(self.device)
                '''Client-side model FP'''
                activation = net(images)
                '''Sending activations to server''' 
                evaluate_server(activation, labels, self.idx, len_batch, ell)
            #prRed('Client{} Test => Epoch: {}'.format(self.idx, ell))
        return          









print(f"Data loading")
if SFLV1:
    test_size = 0.2
elif SFLV2:
    test_size = 0.1
dataset_train, dataset_test = dataloadingHAM10000(test_size)

dict_users = dataset_iid(dataset_train, num_users)
dict_users_test = dataset_iid(dataset_test, num_users)


#------------ Training And Testing -----------------
net_glob_client.train()
w_glob_client = net_glob_client.state_dict() # copy weights

# Federation takes place after certain local epochs in train() client-side
# this epoch is global epoch, also known as rounds
for iter in range(epochs):
    m = max(int(frac * num_users), 1)
    idxs_users = np.random.choice(range(num_users), m, replace = False)
    print(f"Epoch = {iter}/{epochs}, idxs_users = {idxs_users}")
    w_locals_client = []      
    for idx in idxs_users:
        local = Client(net_glob_client, idx, lr, device, dataset_train = dataset_train, dataset_test = dataset_test, idxs = dict_users[idx], idxs_test = dict_users_test[idx])
        '''Training''' 
        w_client = local.train(net = copy.deepcopy(net_glob_client).to(device))
        w_locals_client.append(copy.deepcopy(w_client))        
        '''Testing''' 
        local.evaluate(net = copy.deepcopy(net_glob_client).to(device), ell= iter)
        
    print("------------------------------------------------------------")
    print("------ Fed Server: Federation process at Client-Side -------")
    print("------------------------------------------------------------")
    '''Federation process at Client-Side'''
    # After serving all clients for its local epochs------------
    w_glob_client = FedAvg(w_locals_client)   
    '''Update client-side global model''' 
    net_glob_client.load_state_dict(w_glob_client)    



print("Training and Evaluation completed!")    
'''Save output data to .excel file''' 
# (we use for comparision plots)
round_process = [i for i in range(1, len(acc_train_collect)+1)]
df = DataFrame({'round': round_process,'acc_train':acc_train_collect, 'acc_test':acc_test_collect})     
file_name = program+".xlsx"    
df.to_excel(file_name, sheet_name= "v1_test", index = False)     
