import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torchvision
from dataset import dataloaders
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from torch import nn


class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(100, 256),
            nn.ReLU(inplace=True),
            nn.Linear(256, 512),
            nn.ReLU(inplace=True),
            nn.Linear(512, 28 * 28),
            nn.Tanh(),
        )

    def forward(self, noise):
        return self.model(noise)


class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(28 * 28, 512),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(256, 1),
            nn.Sigmoid(),
        )

    def forward(self, img):
        return self.model(img.view(img.size(0), -1))


class GAN(pl.LightningModule):
    def __init__(self):
        super(GAN, self).__init__()
        self.generator = Generator()
        self.discriminator = Discriminator()
        self.validation_z = torch.randn(10, 100)
        self.automatic_optimization = False  # Disable automatic optimization
        self.generator_loss_epoch = []
        self.discriminator_loss_epoch = []

    def forward(self, z, labels):
        return self.generator(z, labels)

    def training_step(self, batch, batch_idx):
        x, _ = batch
        batch_size = x.size(0)
        optimizer_g, optimizer_d = self.optimizers()  # type: ignore

        # Train Discriminator on real
        optimizer_d.zero_grad()
        y_real = self.discriminator(x)
        real_loss = F.binary_cross_entropy(y_real, torch.ones_like(y_real))
        self.manual_backward(real_loss)
        D_real = y_real.mean()
        self.log(
            "D(real)", D_real, prog_bar=True, logger=True, on_step=True, on_epoch=False
        )

        # Train Discriminator on fake
        noise = torch.randn(batch_size, 100, device=self.device)

        fake_output = self.generator(noise)
        y_fake = self.discriminator(fake_output.detach())
        fake_loss = F.binary_cross_entropy(y_fake, torch.zeros_like(y_fake))
        D_G_z1 = y_fake.mean()
        self.log(
            "D(G(noise))",
            D_G_z1,
            prog_bar=True,
            logger=True,
            on_step=True,
            on_epoch=False,
        )
        self.manual_backward(fake_loss)
        optimizer_d.step()

        d_loss = real_loss + fake_loss
        self.log(
            "discriminator_loss",
            d_loss,
            prog_bar=True,
            logger=True,
            on_step=True,
            on_epoch=True,
        )
        self.discriminator_loss_epoch.append(d_loss)

        # Train Generator
        optimizer_g.zero_grad()
        y_fake_after = self.discriminator(fake_output)
        g_loss = F.binary_cross_entropy(y_fake_after, torch.ones_like(y_fake_after))
        self.manual_backward(g_loss)
        optimizer_g.step()

        D_G_z2 = y_fake_after.mean()
        self.log(
            "D_new(G(noise))",
            D_G_z2,
            prog_bar=True,
            logger=True,
            on_step=True,
            on_epoch=False,
        )
        self.log(
            "D(G(noise)) - D_new(G(noise))",
            D_G_z1 - D_G_z2,
            prog_bar=True,
            logger=True,
            on_step=True,
            on_epoch=False,
        )
        self.log(
            "generator_loss",
            g_loss,
            prog_bar=True,
            logger=True,
            on_step=True,
            on_epoch=True,
        )
        self.generator_loss_epoch.append(g_loss)
        return {"generator_loss": g_loss, "discriminator_loss": d_loss}

    def validation_step(self, batch, batch_idx):
        sample_imgs = self.generator(self.validation_z.to(self.device))
        grid = torchvision.utils.make_grid(sample_imgs.view(-1, 1, 28, 28), nrow=5)
        self.logger.experiment.add_image("generated_images", grid, self.current_epoch)  # type: ignore
        return {}

    def configure_optimizers(self):
        lr = 0.002
        b1 = 0.5
        b2 = 0.999
        opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))  # type: ignore
        opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))  # type: ignore
        return [opt_g, opt_d]


# Training
logger = TensorBoardLogger(name="lightning_logs_GAN", save_dir=".")

early_stop_callback = EarlyStopping(
    monitor="generator_loss_epoch",  # Monitor discriminator's loss for convergence
    min_delta=0.0005,  # Minimum change to qualify as an improvement
    patience=10,  # Number of epochs with no improvement after which training will be stopped
    verbose=True,
    mode="min",  # The training will stop when the quantity monitored has stopped decreasing
)

trainer = pl.Trainer(
    max_epochs=1000,
    logger=logger,
    accelerator="auto",
    # callbacks=[early_stop_callback]
)

model = GAN()
trainer.fit(model, dataloaders["train_label_8"], dataloaders["test_label_8"])
