from trainer.trainer import Trainer, INTERVEL
from models.cdcgan import Generator, Discriminator, IMG_SIZE, NUM_ZV, weights_init, cal_gp
from dataproc.adjust import get_loader
from utils.consts import LOG_PATH, DOC_PATH
import torch
from torchvision import transforms


class Gtrainer(Trainer):
    def __init__(self, task_id: str, conf: dict, title_icon: str) -> None:
        super().__init__(task_id, conf, title_icon)
        self.model_path = f"{LOG_PATH}/{self.task_id}.pth"

    def prepare(self):
        self.loader = get_loader(
            name=self.dataset_name, distr=self.distr, batch_size=self.batch, input_size=IMG_SIZE)
        self.num_cls = self.loader.dataset.num_cls
        self.distr = self.loader.dataset.distr
        self.cls_dict = self.loader.dataset.cls_dict
        self.steps = len(self.loader)
        self.intervel = [round(self.steps*(i+1)/INTERVEL)
                         for i in range(INTERVEL)]

        self.netD = Discriminator(self.num_cls).to(self.device)
        self.netG = Generator(self.num_cls).to(self.device)
        self.netD.apply(weights_init)
        self.netG.apply(weights_init)

        # Initialize loss func and optimizer
        self.optimD = torch.optim.Adam(
            self.netD.parameters(), lr=self.lr, betas=(0.5, 0.999))
        self.optimG = torch.optim.Adam(
            self.netG.parameters(), lr=self.lr, betas=(0.5, 0.999))

        # Label one-hots for G --> [num_cls, num_cls, 1, 1]
        label_1hots = torch.zeros(self.num_cls, self.num_cls)
        for i in range(self.num_cls):
            label_1hots[i, i] = 1
        self.G_1hots = label_1hots.view(
            self.num_cls, self.num_cls, 1, 1).to(self.device)

        # Label one-hot for D --> [num_cls, num_cls, IMG_SIZE, IMG_SIZE]
        label_fills = torch.zeros(
            self.num_cls, self.num_cls, IMG_SIZE, IMG_SIZE)
        ones = torch.ones(IMG_SIZE, IMG_SIZE)
        for i in range(self.num_cls):
            label_fills[i][i] = ones
        self.D_1hots = label_fills.to(self.device)
        self.metrics = None

    def t_loader(self):
        for imgs, labels in self.loader:
            yield imgs, labels

    def train(self):
        super().train()
        res_dict = {
            "distr": self.distr,
            "cls_dict": self.cls_dict,
            "model_path": self.model_path,
            "best_step": self.epoch*self.steps,
        }
        self.gen_imgs()
        return res_dict

    def training_process(self, epc: int, step: int, imgs, labels):
        # ------ train D ------
        # set labels and imgs
        real_imgs = imgs.to(self.device)

        D_label = self.D_1hots[labels]
        G_label = self.G_1hots[labels]

        noises = torch.rand(real_imgs.size(0), NUM_ZV, 1, 1).to(self.device)
        fake_imgs = self.netG(noises, G_label).detach()

        # train D with real and fake imgs
        self.optimD.zero_grad()
        output_real = self.netD(real_imgs, D_label)
        output_fake = self.netD(fake_imgs, D_label)

        # (||grad(D(x))||_2 - 1)^2 's mean
        lossD_gp = cal_gp(real_imgs, fake_imgs, D_label,
                          self.netD, self.device)
        lossD_real = -output_real.mean()
        lossD_fake = output_fake.mean()
        lossD = lossD_real + lossD_fake + lossD_gp
        lossD.backward()
        self.optimD.step()

        # ------ train G ------
        self.netG.zero_grad()
        gen_imgs = self.netG(noises, G_label)
        output = self.netD(gen_imgs, D_label)
        lossG = -output.mean()
        lossG.backward()
        self.optimG.step()

        # update records
        total_step = step+(epc-1)*self.steps

        self.metrics = {
            'LossD': lossD.item(),
            'LossG': lossG.item(),
            'D(x)': output_real.mean().item(),
            'D(G0(z))': output_fake.mean().item(),
            'D(G1(z))': output.mean().item()
        }

        # show msg
        self.send_msg(self.get_msg(epc, step))

        # log records and save model
        if step in self.intervel:
            torch.save(self.netG.state_dict(), self.model_path)
            outcomes = {k: v for k, v in self.metrics.items()}
            outcomes['total_step'] = total_step
            self.logger.log_outcome(outcomes)

        if step == self.steps:
            self.gen_imgs(epc)

    def get_msg(self, epc: int, step: int):
        epoch_len, step_len = len(str(self.epoch)), len(str(self.steps))
        msg = f"TRN:[{epc:0>{epoch_len}}/{self.epoch}][{step:0>{step_len}}/{self.steps}]"
        ending = ', '.join([f'{k}={v:.4f}' for k, v in self.metrics.items()])
        return f"{msg}, {ending}"

    def gen_imgs(self, epc=None):
        pic_num = 10
        res_tensor = []
        for i in range(self.num_cls):
            noise = torch.rand(pic_num, NUM_ZV, 1, 1).to(self.device)
            label = self.G_1hots[torch.full([pic_num,], i)]
            outputs = self.netG(noise, label).detach()
            res_tensor.append(torch.cat([outputs[i]
                              for i in range(pic_num)], dim=2))
        res_tensor = torch.cat(res_tensor, dim=1)
        img = transforms.ToPILImage()(res_tensor)
        tail = '' if epc is None else f"{epc}-"
        img.save(f"{DOC_PATH}/imgs/{self.task_id}-{tail}gen.png")
