import os
from pathlib import Path
from typing import Optional, Dict

import torch
import torch.nn as nn
from einops import rearrange

from accelerate import Accelerator, DistributedType
from torch.utils.data import random_split, DataLoader
from ema_pytorch import EMA
from torchvision.utils import make_grid, save_image

from sgm.data.image_data import ImageDataset
from sgm.data.video_data import VideoDataset, video_tensor_to_gif
from sgm.utils import get_optimizer, generate_random_string


def cycle(dataloader):
    while True:
        for data in dataloader:
            yield data


def accum_log(old_log, new_log):
    for k, new_value in new_log.items():
        old_value = old_log.get(k, 0.)
        old_log[k] = old_value + new_value
    return old_log


class VQGANTrainer:
    def __init__(self, vae, train_dataset, eval_dataset, epochs, batch_size, vq_lr=3e-4, discr_lr=3e-4,
                 vae_max_grad_norm=0.5, discriminator_max_grad_norm=1.0, grad_accum_steps=1, use_vgg_and_gan=False,
                 use_ema=False, ema_update_after_step=0, ema_update_steps=1, save_steps=100, eval_steps=100, log_steps=5,
                 output_dir='./output', apply_grad_penalty_steps=64, accelerate_kwargs=None):
        super().__init__()

        # image_size = vae.image_size
        if accelerate_kwargs is None:
            # accelerate_kwargs = {"mixed_precision": "fp16"}
            accelerate_kwargs = {}

        self.epochs = epochs
        self.steps = 0
        # setup model and parameters
        self.accelerator = Accelerator(**accelerate_kwargs)
        self.vae = vae
        self.use_ema = use_ema
        if self.is_main and use_ema:
            self.ema_vae = EMA(vae, update_after_step=ema_update_after_step, update_every=ema_update_steps)

        self.num_steps = 0
        self.use_vgg_and_gan = use_vgg_and_gan
        if self.use_vgg_and_gan:
            all_parameters = set(vae.parameters())
            discriminator_parameters = set(vae.discriminator.parameters())
            vae_parameters = all_parameters - discriminator_parameters
            self.vae_optim = get_optimizer(vae_parameters, lr=vq_lr)
            self.discriminator_optim = get_optimizer(discriminator_parameters, lr=discr_lr)

        else:
            self.vae_optim = get_optimizer(self.vae.parameters(), lr=vq_lr)

        self.vae_max_grad_norm = vae_max_grad_norm
        self.discriminator_max_grad_norm = discriminator_max_grad_norm
        self.grad_accum_steps = grad_accum_steps

        # create dataloader

        self.batch_size = batch_size
        self.apply_grad_penalty_steps = apply_grad_penalty_steps

        self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
        self.valid_dataloader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=False)

        self.num_train_steps = len(train_dataset) * self.epochs // self.batch_size // self.accelerator.num_processes

        # prepare with accelerator
        if self.use_vgg_and_gan:
            (self.vae, self.vae_optim, self.vae.discriminator_optim, self.train_dataloader) \
                = self.accelerator.prepare(
                self.vae, self.vae_optim, self.vae.discriminator_optim, self.train_dataloader)
        else:
            (self.vae, self.vae_optim, self.train_dataloader) \
                = self.accelerator.prepare(
                self.vae, self.vae_optim, self.train_dataloader)

        self.train_iter = cycle(self.train_dataloader)
        self.valid_iter = cycle(self.valid_dataloader)

        self.save_steps = save_steps
        self.eval_steps = eval_steps
        self.log_steps = log_steps
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)

    @property
    def is_main(self):
        return self.accelerator.is_main_process

    @property
    def is_local_main(self):
        return self.accelerator.is_local_main_process

    @property
    def is_distributed(self):
        return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)

    def print(self, msg):
        self.accelerator.print(msg)

    def save(self, path):
        if not self.accelerator.is_local_main_process:
            return
        pkg = dict(
            model=self.accelerator.get_state_dict(self.vae),
            vae_optim=self.vae_optim.state_dict(),
            discr_optimi=self.discriminator_optim.state_dict()
        )
        torch.save(pkg, path)

    def load(self, path):
        path = Path(path)
        assert path.exists()
        pkg = torch.load(path)

        vae = self.accelerator.unwrap_model(self.vae)
        vae.load_state_dict(pkg['model'])

        self.vae_optim.load_state_dict(pkg['vae_optim'])
        self.discriminator_optim.load_state_dict(pkg['discr_optim'])

    def train(self):
        device = self.accelerator.device
        apply_grad_penalty = not (self.steps % self.apply_grad_penalty_steps)

        self.vae.train()

        for epoch in range(self.epochs):
            for data in self.train_dataloader:
                data = data.to(device)

                logs = {}
                with self.accelerator.autocast():
                    # vae_loss, _ = self.vae(data, apply_grad_penalty=apply_grad_penalty)
                    total_loss, loss_dict, _ = self.vae(data)
                self.accelerator.backward(total_loss)

                total_loss = self.accelerator.gather(total_loss).mean().item()
                logs['total_loss'] = total_loss
                for k, v in loss_dict.items():
                    logs[k] = self.accelerator.gather(v).mean().item()

                if self.vae_max_grad_norm is not None:
                    self.accelerator.clip_grad_norm_(self.vae.parameters(), self.vae_max_grad_norm)

                self.vae_optim.step()
                self.vae_optim.zero_grad()
                self.accelerator.wait_for_everyone()

                if self.use_vgg_and_gan:
                    with self.accelerator.autocast():
                        discr_loss = self.vae(data, return_discriminator_loss=True)
                    self.accelerator.backward(discr_loss)

                    discr_loss = self.accelerator.gather(discr_loss).mean().item()
                    logs['discr_loss'] = discr_loss.item()

                    if self.discriminator_max_grad_norm is not None:
                        self.accelerator.clip_grad_norm_(self.vae.discriminator.parameters(), self.discriminator_max_grad_norm)

                    self.discriminator_optim.step()
                    self.discriminator_optim.zero_grad()

                logs_str = " | ".join([f"{k}: {v}" for k, v in logs.items()])
                if self.steps % self.log_steps == 0 and self.steps != 0:
                    self.print(
                        f"[{self.steps}|{self.num_train_steps}] {logs_str}")

                # self.accelerator.wait_for_everyone()
                # if self.is_main and self.use_ema:
                #     self.ema_vae.update()
                # self.accelerator.wait_for_everyone()

                # if self.is_main and not (self.steps % self.eval_steps) and self.steps != 0:
                #     unwrapped_vae = self.accelerator.unwrap_model(self.vae)
                #     vae_to_eval = ((unwrapped_vae, str(self.steps)),)
                #     if self.use_ema:
                #         vae_to_eval = ((self.ema_vae.ema_model, f'{self.steps}.ema'),) + vae_to_eval
                #
                #     for model, steps_str in vae_to_eval:
                #         model.eval()
                #
                #         for val_data in self.valid_dataloader:
                #             val_data = val_data.to(device)
                #             is_video = val_data.ndim == 5  # (b, c, f, h, w)
                #
                #             with torch.no_grad():
                #                 recons = model(val_data, only_return_recon=True)
                #
                #             if is_video:
                #                 sampled_video_path = os.path.join(self.output_dir, f'sample.{steps_str}')
                #                 sampled_video_path = Path(sampled_video_path)
                #                 sampled_video_path.mkdir(parents=True, exist_ok=True)
                #
                #                 for tensor in recons.unbind(dim=0):
                #                     file_name = generate_random_string(8)
                #                     video_tensor_to_gif(tensor, str(sampled_video_path / f'{file_name}.gif'))
                #             else:
                #                 imgs_and_recons = torch.stack((val_data, recons), dim=0)
                #                 imgs_and_recons = rearrange(imgs_and_recons, 'r b ... -> (b r) ...')
                #
                #                 imgs_and_recons = imgs_and_recons.detach().cpu().float().clamp(0., 1.)
                #                 grid = make_grid(imgs_and_recons, nrow=2, normalize=True, value_range=(0, 1))
                #
                #                 logs['reconstructions'] = grid
                #
                #                 save_image(grid, str(self.output_dir / f'{file_name}.png'))
                #
                #         self.print(f'{self.steps}: saved to {str(self.output_dir)}')

                self.accelerator.wait_for_everyone()
                if self.is_main and not (self.steps % self.save_steps) and self.steps != 0:
                    state_dict = self.vae.state_dict()
                    model_path = os.path.join(self.output_dir, f'vae.{self.steps}.pt')
                    torch.save(state_dict, model_path)

                    if self.use_ema:
                        ema_state_dict = self.ema_vae.state_dict()
                        model_path = os.path.join(self.output_dir, f'vae.{self.steps}.ema.pt')
                        torch.save(ema_state_dict, model_path)

                    self.print(f'saving model ...')

                self.steps += 1

        return logs