from copy import deepcopy

import numpy as np
import torch
import logging

from client import Client
from data import *
from models import *
from server import Server
from utils import *


class Trainer:
    def __init__(self,
                 n_clients: int = 10,
                 epoch: int = 10,
                 optimizer: str = "SGD",
                 weight_decay: float = 1e-4,
                 momentum: float = 0.9,
                 lr: float = 0.1,
                 scheduler: str = None,
                 step_size: int = 30,
                 gamma: float = 0.1,
                 dataset: str = "CIFAR10",
                 path: str = "../",
                 alpha: float = 100,
                 model: str = "VGG19",
                 batch_size: int = 64,
                 compress_policy: str = None,
                 ratio: float = 0.98,
                 bit: int = 2,
                 client_device: torch.device = torch.device('cpu'),
                 server_device: torch.device = torch.device('cuda'),
                 ) -> None:
        logging.getLogger().setLevel(logging.INFO)
        self.dataset = None
        self.n_clients = n_clients
        if dataset == "CIFAR10":
            self.dataset = Cifar10(n_clients, batch_size, alpha, path)
        elif dataset == "CIFAR100":
            self.dataset = Cifar100(n_clients, batch_size, alpha, path)
        elif dataset == "MNIST":
            self.dataset = Mnist(n_clients, batch_size, alpha, path)
        elif dataset == "IMAGENET-1K":
            self.dataset = ImageNet(n_clients, batch_size, alpha, path)
        else:
            logging.error(f"No Dataset: {dataset}")
            return
        logging.info(f"Dataset: {dataset}")

        if model == "VGG19":
            self.client_side_model = ClientSideVGG19
            self.server_side_model = ServerSideVGG19
            self.model = "VGG19"
        elif model == "RESNET18":
            self.client_side_model = ClientSideResNet18
            self.server_side_model = ServerSideResNet18
            self.model = "RESNET18"
        else:
            logging.error(f"No Dataset: {model}")
            return
        logging.info(f"Dataset: {model}")

        self.compressor = None
        if compress_policy == "sparsification":
            self.compressor = Sparser(ratio)
            compress_policy += str(ratio)
        elif compress_policy == "quantization":
            self.compressor = Quantizer(bit)
            compress_policy += str(bit)
        elif compress_policy == "ssfdme":
            self.compressor = SSFDME(ratio, bit)
            compress_policy += (str(ratio) + str(bit))
        else:
            logging.error(f"No Compress policy: {compress_policy}")
            return
        logging.info(f"Compress policy: {str(self.compressor)}")

        self.clients = [
            Client(i + 1, 10, self.dataset.trainLoader[i], self.client_side_model().to(client_device), lr, optimizer,
                   weight_decay, momentum, scheduler, step_size, gamma, compress_policy) for i in range(n_clients)]
        Client.validate_loader = self.dataset.validationLoader
        self.server = Server(self.server_side_model().to(server_device), lr, optimizer,
                             weight_decay, momentum, scheduler, step_size, gamma)
        self.epoch = epoch
        self.criterion = torch.nn.CrossEntropyLoss()

        self.client_device = client_device
        self.server_device = server_device
        self.loss_lst = []
        self.acc1_lst = []
        self.acc5_lst = []

    def train(self, epoch):
        for index, client in enumerate(self.clients):
            self.server.model.train()
            client.model.train()
            if epoch != 0 or index != 0:
                client.load_weight()
            lr = self.server.optimizer.state_dict()['param_groups'][0]['lr']
            tmp = deepcopy(client.optimizer.state_dict())
            tmp['param_groups'][0]['lr'] = lr
            client.optimizer.load_state_dict(tmp)
            loss = None
            for idx, (x, y) in enumerate(client.dataloader):
                client.optimizer.zero_grad()
                self.server.optimizer.zero_grad()
                smashed_data = client.model(x)
                if self.compressor is not None:
                    v, m = self.compressor.encode(smashed_data)
                    # Quantization transfers smashed_data(quantized data), v(k), m(b)
                    # Sparsification transfers v(vector), m(mask)
                    # SSFDME transfers v(vector), m(mask)
                    self.compressor.decode(smashed_data, v, m)
                smashed_data = smashed_data.to(self.server_device)
                y = y.to(self.server_device)
                output = self.server.model(smashed_data)
                loss = self.criterion(output, y)
                loss.backward()
                self.server.optimizer.step()
                client.optimizer.step()
                self.loss_lst.append(loss.item())
            logging.info(
                f"Epoch[{epoch + 1}|{self.epoch}]\tClient[{index + 1}|{self.n_clients}]\tLoss: {round(loss.item(), 3)}")
            client.save_weight()
            self.server.model.eval()
            client.model.eval()
            acc = self.validate(index)
            logging.info(
                f"Epoch[{epoch + 1}|{self.epoch}]\tClient[{index + 1}|{self.n_clients}]\t"
                f"Top-1 Accuracy: {round(acc[0], 3)}\tTop-5 Accuracy: {round(acc[1], 3)}")
            self.acc1_lst.append(acc[0])
            self.acc5_lst.append(acc[1])
            if self.server.scheduler is not None:
                self.server.scheduler.step()

    def train_loop(self):
        for e in range(self.epoch):
            self.train(e)
            print()
        # self.plot()
        loss = np.array(self.loss_lst)
        acc1 = np.array(self.acc1_lst)
        acc5 = np.array(self.acc5_lst)
        np.save(f"./res/data/{self.model}_{str(self.dataset)}_{str(self.compressor)}_loss.npy", loss)
        np.save(f"./res/data/{self.model}_{str(self.dataset)}_{str(self.compressor)}_acc1.npy", acc1)
        np.save(f"./res/data/{self.model}_{str(self.dataset)}_{str(self.compressor)}_acc5.npy", acc5)

    def validate(self, index):
        total = 0
        correct1 = 0
        correct2 = 0
        for idx, (input, target) in enumerate(Client.validate_loader):
            total += len(input)
            target = target.to(self.server_device)
            with torch.no_grad():
                smashed_data = self.clients[index].model(input)
                if self.compressor is not None:
                    v, m = self.compressor.encode(smashed_data)
                    # Quantization transfers smashed_data(quantized data), v(k), m(b)
                    # Sparsification transfers v(vector), m(mask)
                    # SSFDME transfers v(vector), m(mask)
                    self.compressor.decode(smashed_data, v, m)
                smashed_data = smashed_data.to(self.server_device)
                output = self.server.model(smashed_data)
                predict = output.argmax(dim=1)
                correct1 += torch.eq(predict, target).sum().float().item()
                target_resize = target.view(-1, 1)
                _, predict = output.topk(5)
                correct2 += torch.eq(predict, target_resize).sum().float().item()
        return correct1 / total, correct2 / total

    def plot(self):
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(1, 3, figsize=(15, 3), dpi=600)
        ax[0].plot(self.acc1_lst, label="Testing Top-1 Accuracy")
        ax[1].plot(self.acc5_lst, label="Testing Top-5 Accuracy")
        ax[2].plot(self.loss_lst[::10], label="Training loss")
        fig.legend()
        fig.savefig(f"./res/image/{self.model}_{str(self.dataset)}_"
                    f"compress_{str(self.compressor)}.pdf")
        fig.show()
