import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import numpy as np
import copy
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import Aggregation
from models.test import test_img_avg
import wandb
class LocalUpdate_FedAvg(object):
    def __init__(self, args, dataset=None, idxs=None, verbose=False):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.selected_clients = []
        self.ldr_train = DataLoader(DatasetSplit(dataset, idxs,args), batch_size=self.args.local_bs, shuffle=True)
        self.verbose = verbose

    def train(self, round, net):

        net.train()
        # train and update
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)

        Predict_loss = 0
        for iter in range(self.args.local_ep):

            for batch_idx, (images, labels) in enumerate(self.ldr_train):
                images, labels = images.to(self.args.device), labels.to(self.args.device)
                if self.args.dataset == 'widar':
                    labels = labels.long()
                if (images.size()[0] <= 1):
                    continue
                net.zero_grad()
                log_probs = net(images)['output']
                loss = self.loss_func(log_probs, labels)
                loss.backward()
                optimizer.step()

                Predict_loss += loss.item()

        if self.verbose:
            info = '\nUser predict Loss={:.4f}'.format(Predict_loss / (self.args.local_ep * len(self.ldr_train)))
            print(info)

        return net.state_dict()

def FedAvg(args,net_glob, dataset_train, dataset_test, dict_users,temp=2):

    net_glob.train()

    # training
    acc = []
    acc.append([])
    # temp  = 2
    for iter in range(args.epochs):

        print('*'*80)
        print('Round {:3d}'.format(iter))


        w_locals = []
        lens = []
        m = max(int(args.frac * args.num_users), 1)
        # idxs_users = np.random.choice(range(args.num_users), m, replace=False)
        # m1 = int(m * args.client1_frac)
        # m2 = int(m * args.client2_frac)
        # m3 = int(m * args.client3_frac)
        # idxs_users = []
        if temp == 0:
            idxs_users = np.random.choice(range(0,int(args.num_users)),m,replace=False)
        elif temp == 1:
            if m > int(args.num_users) - int(args.num_users * args.client1_frac):
                m = int(args.num_users) - int(args.num_users * args.client1_frac)
            idxs_users = np.random.choice(range(int(args.num_users*args.client1_frac),int(args.num_users)),m,replace=False)
        elif temp == 2:
            if m > int(args.num_users) - int(
                    args.num_users * (args.client1_frac + args.client2_frac)):
                m = int(args.num_users) - int(
                    args.num_users * (args.client1_frac + args.client2_frac))
            idxs_users = np.random.choice(range(int(args.num_users*(args.client1_frac+args.client2_frac)),int(args.num_users)),m,replace=False)
        for index,idx in enumerate(idxs_users):
            local = LocalUpdate_FedAvg(args=args, dataset=dataset_train, idxs=dict_users[idx])
            w = local.train(round=iter,net=copy.deepcopy(net_glob).to(args.device))

            w_locals.append(copy.deepcopy(w))
            lens.append(len(dict_users[idx]))
        # update global weights
        w_glob = Aggregation(w_locals, lens)
        # acc_dict = {}
        # copy weight to net_glob
        net_glob.load_state_dict(w_glob) 
        acc_item = test(net_glob, dataset_test, args)
        # print(type(acc_item))
        acc[0].append(acc_item)
        # wandb.log({'accuracy':acc_item})
        # acc_dict['accuracy'] = acc_item

        # for i in range(m):
        #     net_glob.load_state_dict(w_locals[i])
        #     test_a = test(net_glob,dataset_test,args)
        #     acc_dict['accuracy_local{}'.format(i+1)] = test_a
        # net_glob.load_state_dict(w_glob)
        # wandb.log(acc_dict)
    save_result(acc, 'test_acc', args)

def test(net_glob, dataset_test, args):
    
    # testing
    acc_test, loss_test = test_img_avg(net_glob, dataset_test, args)

    print("Testing accuracy: {:.2f}".format(acc_test))

    return acc_test.item()
# import torch
# import torch.nn.functional as F
# from torch.utils.data import DataLoader
# from torch import nn 
# import numpy as np
# import copy
# from models.Update import DatasetSplit
# from utils.save_result import save_result
# from models.aggregation import Aggregation
# from models.aggregation import AggregationNoData
# from models.test import test_img_avg
# import wandb
# class LocalUpdate_FedAvg(object):
#     def __init__(self, args, dataset=None, idxs=None, verbose=False):
#         self.args = args
#         self.loss_func = nn.CrossEntropyLoss()
#         self.selected_clients = []
#         self.ldr_train = DataLoader(DatasetSplit(dataset, idxs), batch_size=self.args.local_bs, shuffle=True)
#         self.verbose = verbose

#     def train(self, round, net):

#         net.train()
#         # train and update
#         if self.args.optimizer == 'sgd':
#             optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
#                                         momentum=self.args.momentum,weight_decay=self.args.weight_decay)
#         elif self.args.optimizer == 'adam':
#             optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)

#         Predict_loss = 0
#         for iter in range(self.args.local_ep):

#             for batch_idx, (images, labels) in enumerate(self.ldr_train):
#                 images, labels = images.to(self.args.device), labels.to(self.args.device)
#                 net.zero_grad()
#                 log_probs = net(images)['output']
#                 loss = self.loss_func(log_probs, labels)
#                 loss.backward()
#                 optimizer.step()

#                 Predict_loss += loss.item()

#         if self.verbose:
#             info = '\nUser predict Loss={:.4f}'.format(Predict_loss / (self.args.local_ep * len(self.ldr_train)))
#             print(info)

#         return net.state_dict()

# def FedAvg(args,net_glob, dataset_train, dataset_test, dict_users):

#     net_glob.train()
#     net_list = []
#     for i in range(10):
#         net_list.append(copy.deepcopy(net_glob))
#     # training
#     acc = []

#     for iter in range(args.epochs):

#         print('*'*80)
#         print('Round {:3d}'.format(iter))

#         w_local1 = []
#         w_local2 = []
#         w_local3 = []
#         w_local4 = []
#         w_locals = []
#         m = max(int(args.frac * args.num_users), 1)
#         idxs_users = np.random.choice(range(args.num_users), m, replace=False)
#         for index,idx in enumerate(idxs_users):
#             local = LocalUpdate_FedAvg(args=args, dataset=dataset_train, idxs=dict_users[idx])
#             w = local.train(round=iter,net=copy.deepcopy(net_list[index]).to(args.device))
#             if index < 3:
#                 w_local1.append(copy.deepcopy(w))
#             elif index < 6:
#                 w_local2.append(copy.deepcopy(w))
#             elif index < 8:
#                 w_local3.append(copy.deepcopy(w))
#             else:
#                 w_local4.append(copy.deepcopy(w))

#         # update global weights
#         w_1  = AggregationNoData(w_local1)
#         w_2  = AggregationNoData(w_local2)
#         w_3  = AggregationNoData(w_local3)
#         w_4  = AggregationNoData(w_local4)
#         for index in range(m):
#             if index < 3:
#                 net_list[index].load_state_dict(w_1)
#             elif index < 6:
#                 net_list[index].load_state_dict(w_2)
#             elif index < 8:
#                 net_list[index].load_state_dict(w_3)
#             else:
#                 net_list[index].load_state_dict(w_4)
#         w_locals = []
#         w_locals.append(w_1)
#         w_locals.append(w_2)
#         w_locals.append(w_3)
#         w_locals.append(w_4)
#         w_glob = AggregationNoData(w_locals)
#         acc_dict = {}
#         # copy weight to net_glob
#         net_glob.load_state_dict(w_glob)
#         acc_item = test(net_glob, dataset_test, args)
#         # print(type(acc_item))
#         acc.append(acc_item)
#         # wandb.log({'accuracy':acc_item})
#         acc_dict['accuracy'] = acc_item

#         for i in range(m):
#             test_a = test(net_list[i],dataset_test,args)
#             acc_dict['accuracy_local{}'.format(i+1)] = test_a
#         for i in range(m):
#             net_list[i].load_state_dict(w_glob)
#         wandb.log(acc_dict)
#     save_result(acc, 'test_acc', args)

# def test(net_glob, dataset_test, args):
    
#     # testing
#     acc_test, loss_test = test_img_avg(net_glob, dataset_test, args)

#     print("Testing accuracy: {:.2f}".format(acc_test))

#     return acc_test.item()