import argparse
from weakref import ref

import pytorch_lightning as pl
import torch
import torch.autograd as autograd
from dataset import get_data_loader, transform_data
from evaluate_calosim import EvaluateCaloSim
from pytorch_lightning.loggers import TensorBoardLogger
from torch import nn

NOISE_DIM = 100
NUM_LABEL = 15
IMAGE_DIM = 368
EMBBEDDING_DIM = 10


class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()
        self.label_emb = nn.Embedding(NUM_LABEL, EMBBEDDING_DIM)
        self.model = nn.Sequential(
            nn.Linear(NOISE_DIM + EMBBEDDING_DIM, 256),
            nn.ReLU(True),
            nn.Linear(256, 512),
            nn.ReLU(True),
            nn.Linear(512, IMAGE_DIM),
            nn.Tanh(),
        )

    def forward(self, noise, labels):
        labels = self.label_emb(labels).view(-1, EMBBEDDING_DIM)
        input = torch.cat((noise, labels), dim=1)
        return self.model(input)


class Critic(nn.Module):
    def __init__(self):
        super(Critic, self).__init__()
        self.label_emb = nn.Embedding(NUM_LABEL, EMBBEDDING_DIM)
        self.model = nn.Sequential(
            nn.Linear(IMAGE_DIM + EMBBEDDING_DIM, 512),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(256, 1),
            # nn.Sigmoid()
        )

    def forward(self, img, labels):
        labels = self.label_emb(labels).view(-1, EMBBEDDING_DIM)
        input = torch.cat((img, labels), dim=1)
        return self.model(input)


class GAN(pl.LightningModule):
    def __init__(self, args, prof):
        super(GAN, self).__init__()
        self.args = args
        self.generator = Generator()
        self.critic = Critic()
        self.automatic_optimization = False
        self.validation_z = torch.randn(10240, NOISE_DIM)
        self.validation_labels = torch.randint(0, NUM_LABEL, (10240, 1))
        self.prof = prof

    def forward(self, z, labels):
        return self.generator(z, labels)

    def compute_gradient_penalty(self, real_imgs, fake_imgs, labels):
        """Calculates the gradient penalty loss for WGAN GP"""
        # Random weight term for interpolation between real and fake samples
        batch_size = real_imgs.size(0)
        real_imgs = real_imgs.view(batch_size, IMAGE_DIM).requires_grad_(True)
        alpha = torch.rand(batch_size, 1, device=real_imgs.device)
        # Get random interpolation between real and fake samples
        interpolates = (alpha * real_imgs + (1 - alpha) * fake_imgs).requires_grad_(
            True
        )
        # Discriminator output for interpolates
        d_interpolates = self.critic(interpolates, labels)
        # Create tensor filled with ones for gradient calculation
        fake = torch.ones(batch_size, 1, device=real_imgs.device, requires_grad=False)
        # Get gradient w.r.t. interpolates
        gradients = autograd.grad(
            outputs=d_interpolates,
            inputs=interpolates,
            grad_outputs=fake,
            create_graph=True,
            retain_graph=True,
            only_inputs=True,
        )[0]
        gradients = gradients.view(batch_size, -1)
        gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
        return gradient_penalty

    def training_step(self, batch, batch_idx):
        self.prof.step()

        real_imgs, labels = batch
        batch_size = real_imgs.size(0)
        optimizer_g, optimizer_d = self.optimizers()  # type: ignore

        k = 5
        for i in range(k):
            y_real = self.critic(real_imgs, labels)
            noise = torch.randn(batch_size, NOISE_DIM, device=self.device)
            fake_imgs = self.generator(noise, labels)
            y_fake = self.critic(fake_imgs, labels)

            optimizer_d.zero_grad()
            loss_critic = -torch.mean(y_real) + torch.mean(y_fake)

            if self.args.penalty == "gp":
                gradient_penalty = self.compute_gradient_penalty(
                    real_imgs, fake_imgs, labels
                )
                loss_critic += self.args.gp_lambda * gradient_penalty

            self.manual_backward(
                loss_critic, retain_graph=True if i == k - 1 else False
            )
            optimizer_d.step()

            self.log(
                "critic_loss",
                loss_critic,
                prog_bar=True,
                logger=True,
                on_step=True,
                on_epoch=True,
            )

            if self.args.penalty == "clip":
                CLAMP = self.args.clip
                for p in self.critic.parameters():
                    p.data.clamp_(-CLAMP, CLAMP)

        y_fake_after = self.critic(fake_imgs, labels)
        g_loss = -torch.mean(y_fake_after)
        optimizer_g.zero_grad()
        self.manual_backward(g_loss)
        optimizer_g.step()
        self.log(
            "generator_loss",
            g_loss,
            prog_bar=True,
            logger=True,
            on_step=True,
            on_epoch=True,
        )

        return {"generator_loss": g_loss, "critic_loss": loss_critic}

    def validation_step(self, batch, batch_idx):
        if batch_idx != 0:
            return {}
        # print("Validation step")
        ref_imgs, labels = batch
        labels = labels.detach().cpu()
        ref_imgs = ref_imgs.detach().cpu()
        ref_imgs, _ = transform_data(ref_imgs, labels, reverse=True)
        ref_eval = EvaluateCaloSim("1-photons", ref_imgs.numpy())

        sample_imgs = (
            self.generator(
                self.validation_z.to(self.device),
                self.validation_labels.to(self.device),
            )
            .detach()
            .cpu()
        )
        sample_imgs, _ = transform_data(
            sample_imgs, self.validation_labels, reverse=True
        )
        gen_eval = EvaluateCaloSim("1-photons", sample_imgs.numpy())
        self.logger.experiment.add_image(  # type: ignore
            "avarage_shower",
            gen_eval.DrawAverageShower(),
            self.current_epoch,
            dataformats="HWC",
        )
        self.logger.experiment.add_image(  # type: ignore
            "reference_shower",
            ref_eval.DrawAverageShower(),
            self.current_epoch,
            dataformats="HWC",
        )
        return {}

    def configure_optimizers(self):
        lr = self.args.lr
        opt_g = torch.optim.RMSprop(self.generator.parameters(), lr=lr)  # type: ignore
        opt_d = torch.optim.RMSprop(self.critic.parameters(), lr=lr)  # type: ignore
        return [opt_g, opt_d]


if __name__ == "__main__":
    # receive args from command line
    parser = argparse.ArgumentParser()
    parser.add_argument("--version", type=str, default="latest", help="Job ID")
    parser.add_argument("--lr", type=float, default=0.002, help="learning rate")
    parser.add_argument("--clip", type=float, default=0.1, help="clip value")
    parser.add_argument("--batch_size", type=int, default=128, help="batch size")
    parser.add_argument(
        "--penalty", type=str, default="clip", help="clip or gp(gradient penalty)"
    )
    parser.add_argument(
        "--gp_lambda", type=float, default=10.0, help="lambda for gradient penalty"
    )
    args = parser.parse_args()

    # Training
    logger = TensorBoardLogger(
        name="lightning_logs_CaloSim_WGAN",
        save_dir=".",
        version=f"{args.version}+clip_{args.clip}+lr_{args.lr}+batch_size_{args.batch_size}+penalty_{args.penalty}+gp_lambda_{args.gp_lambda}",
    )

    trainer = pl.Trainer(
        max_epochs=500,
        logger=logger,
        accelerator="auto",
    )

    prof = torch.profiler.profile(
        schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=1),
        on_trace_ready=torch.profiler.tensorboard_trace_handler(
            "lightning_logs_CaloSim_WGAN"
        ),
        record_shapes=True,
        profile_memory=True,
        with_stack=True,
    )
    prof.start()
    model = GAN(args, prof)
    trainer.fit(
        model,
        get_data_loader(
            "1-phontons", batch_size=args.batch_size, shuffle=True, num_workers=8
        ),
        get_data_loader("1-phontons", batch_size=10240, shuffle=False, num_workers=8),
    )
    prof.stop()
