import logging

from accelerate import Accelerator
import os
import torch
from accelerate.logging import get_logger
from tqdm import tqdm
import math
from time import time
from accelerate.utils import ProjectConfiguration

logger = get_logger(__name__)


class AccelerateTrainer:

    def __init__(self,
                 gradient_accumulation_steps: int = 1,
                 limit_train_batches: float = float("inf"),
                 limit_val_batches: float = float("inf"),
                 checkpoint_dir: str = "./checkpoints",
                 log_with: str = "wandb",
                 project_name: str = "untitled",
                 log_every_n_steps: int = 1,
                 valid_every_n_steps: int = None,
                 valid_at_epoch_end=True,
                 max_steps: int = 1000,
                 max_epochs: int = 10,
                 checkpoint_frequency: int = 100,
                 ):
        """
        Example Trainer with Accelerate😊 of HuggingFace.
        Acknowledgement: https://github.com/Lightning-AI/lightning/tree/master/examples/fabric/build_your_own_trainer
        """
        if project_name is not None:
            self.project_name = project_name
            self.project_dir = f"accelerate_logs/{project_name}"
            self.project_config = ProjectConfiguration(project_dir=self.project_dir)
        else:
            self.project_config = None

        self.accelerator = Accelerator(
            gradient_accumulation_steps=gradient_accumulation_steps,
            project_config=self.project_config,
            log_with=log_with,
        )

        if self.accelerator.is_main_process:
            os.makedirs(checkpoint_dir, exist_ok=True)

        if self.accelerator.is_main_process:
            if project_name is not None:
                os.makedirs(self.project_dir, exist_ok=True)
        self.accelerator.wait_for_everyone()

        self.completed_step = 0  # In checkPoint
        self.global_step = 0
        self.iter_step = 0
        self.current_epoch = 0
        self.should_stop = False

        # DeepSpeed
        if self.accelerator.distributed_type == self.accelerator.distributed_type.DEEPSPEED:
            logger.info("Gradient accumulation is replaced by DeepSpeed Config: f{gradient_accumulation_steps}")
            self.gradient_accumulation_steps = self.accelerator.state.deepspeed_plugin.deepspeed_config[
                "gradient_accumulation_steps"]
        else:
            self.gradient_accumulation_steps = gradient_accumulation_steps

        self.limit_train_batches = limit_train_batches
        self.limit_val_batches = limit_val_batches
        self.checkpoint_dir = checkpoint_dir
        self.checkpoint_frequency = checkpoint_frequency
        self.log_every_n_steps = log_every_n_steps
        self.valid_every_n_steps = valid_every_n_steps
        self.valid_at_epoch_end = valid_at_epoch_end
        self.model = None
        self.max_epochs = max_epochs
        self.max_steps = max_steps
        self.eval_loss_history = []

    def fit(self, model, optimizer, lr_scheduler, train_loader, val_loader, ckpt_path=None):
        # Prepare for distributed training
        model, optimizer, train_loader, val_loader = self.accelerator.prepare(
            model, optimizer, train_loader, val_loader
        )

        if self.accelerator.distributed_type == self.accelerator.distributed_type.DEEPSPEED:
            ds_gradient_accumulation_steps = self.accelerator.state.deepspeed_plugin.deepspeed_config[
                "gradient_accumulation_steps"]
            if self.gradient_accumulation_steps != ds_gradient_accumulation_steps:
                logger.info(f"Gradient accumulation is replaced by DeepSpeed Config: {ds_gradient_accumulation_steps}")

        # Why do not prepare for lr_scheduler?
        # https://github.com/huggingface/accelerate/issues/1382
        # Q: When should we perform learning decay?
        #   A: When optimizer.step() is performed on any gpu.
        #   B: When optimized.step() is performed on every gpu and gathered.
        # Current tate : No Answer
        # If A : Then the lr_scheduler total step should be multiped by accelerator.num_process
        # If B : The lr_scheduler should not be prepared

        self.model = model

        # Resume from checkpoint
        if ckpt_path is not None:
            self.load_checkpoint(ckpt_path)

        # Fit loop
        while not self.should_stop:
            self.train_loop(
                model=model,
                optimizer=optimizer,
                lr_scheduler=lr_scheduler,
                train_loader=train_loader,
                val_loader=val_loader,
                limit_batches=self.limit_train_batches,
            )

            if self.valid_at_epoch_end and self.global_step > self.completed_step:
                self.val_loop(
                    model=model,
                    val_loader=val_loader,
                    limit_batches=self.limit_val_batches,
                )

            self.current_epoch += 1

            if self.max_epochs is not None and self.current_epoch >= self.max_epochs:
                self.should_stop = True

        self.accelerator.end_training()

    def train_loop(self, model, optimizer, lr_scheduler, train_loader, val_loader=None, limit_batches=None):
        model.train()

        iterable = self.progbar_wrapper(
            train_loader,
            total=min(len(train_loader), limit_batches),
            desc=f"Epoch {self.current_epoch}",
        )

        for batch_idx, batch in enumerate(iterable):
            self.iter_step += 1
            if self.global_step < self.completed_step:
                if self.iter_step % self.gradient_accumulation_steps == 0:
                    self.global_step += 1
                    lr_scheduler.step()
                continue

            if self.should_stop or batch_idx >= limit_batches:
                break

            with self.accelerator.accumulate(model):
                # Forward pass
                x, y = batch['X'].view(-1, 1).to(model.module[0].bias.dtype), batch['Y'].to(model.module[0].bias.dtype)
                loss = torch.nn.MSELoss()(model(x), y)

                # Backward pass
                self.accelerator.backward(loss)
                # Step
                optimizer.step()
                # Zero grad
                optimizer.zero_grad()

            if self.accelerator.sync_gradients and not self.accelerator.optimizer_step_was_skipped:
                self.global_step += 1
                if self.accelerator.is_main_process:
                    iterable.set_postfix(global_step=self.global_step)
                # Update lr_scheduler
                lr_scheduler.step()

                train_loss_value = torch.mean(self.accelerator.gather_for_metrics(loss)).item()
                log_data = {
                    "train/loss": train_loss_value,
                    "train/learning_rate": optimizer.param_groups[0]["lr"],
                    "train/ppl": train_loss_value,
                    "train/epoch": self.current_epoch,
                    "train/steps": self.global_step,
                }

                self.accelerator.log(log_data, step=self.global_step)

                # Log`
                if self.global_step % self.log_every_n_steps == 0:
                    self.accelerator.log(log_data, step=self.global_step)

                # Checkpoint
                if self.global_step % self.checkpoint_frequency == 0:
                    self.save_checkpoint()

                # Decide whether to validate
                if self.global_step % self.valid_every_n_steps == 0:
                    self.val_loop(model=model, val_loader=val_loader, limit_batches=self.limit_val_batches)
                    model.train()

                if self.max_steps is not None and self.global_step >= self.max_steps:
                    self.should_stop = True
                    break

    @torch.no_grad()
    def val_loop(self, model, val_loader, limit_batches=None):
        if val_loader is None:
            return

        start_time = time()
        model.eval()
        losses = []
        iterable = self.progbar_wrapper(val_loader, total=min(len(val_loader), limit_batches), desc="Validation")
        for batch_idx, batch in enumerate(iterable):
            # Set Batch-size
            batch_size = self.experiment_args['batch_size']
            assert batch_size > 0
            # Forward pass
            x, y = batch['X'].view(-1, 1).to(model.module[0].bias.dtype), batch['Y'].to(model.module[0].bias.dtype)
            loss = torch.nn.MSELoss()(model(x), y)
            losses.append(self.accelerator.gather(loss.repeat(batch_size)))
        losses = torch.cat(losses)
        try:
            eval_loss = torch.mean(losses).item()
            perplexity = math.exp(eval_loss)
        except OverflowError:
            eval_loss = float("inf")
            perplexity = float("inf")
        end_time = time()

        log_data = {
            "val/loss": eval_loss,
            "val/ppl": perplexity,
            "val/time": end_time - start_time,
            "val/epoch": self.current_epoch,
            "val/global_step": self.global_step,
        }

        self.eval_loss_history.append(eval_loss)

        self.accelerator.log(log_data, step=self.global_step)

        return perplexity, eval_loss

    def progbar_wrapper(self, iterable, total: int = None, **kwargs):
        if self.accelerator.is_main_process:
            return tqdm(iterable, total=total, ncols=100, **kwargs)
        return iterable

    def save_checkpoint(self):
        ckpt_path = os.path.join(self.checkpoint_dir,
                                 f"{self.project_name}_epoch={self.current_epoch}_step={self.global_step}")
        trainer_state = {
            "epoch": self.current_epoch,
            "global_step": self.global_step,
            "eval_loss_history": self.eval_loss_history,
        }
        save_location = self.accelerator.save_state(ckpt_path)
        if self.accelerator.is_main_process:
            # with open(os.path.join(save_location, "trainer_state.json"), "w+") as f:
            #     json.dump(trainer_state, f)
            torch.save(trainer_state, os.path.join(save_location, "trainer_state.pt"))
        logger.info("Save checkpoint to {}".format(save_location))

    def load_checkpoint(self, ckpt_path):
        self.accelerator.load_state(ckpt_path)
        trainer_state = os.path.join(ckpt_path, "trainer_state.pt")
        with self.accelerator.main_process_first():
            if os.path.exists(trainer_state):
                trainer_state = torch.load(trainer_state)
            self.completed_step = trainer_state["global_step"]
            self.eval_loss_history = trainer_state.get("eval_loss_history", [])
        self.accelerator.wait_for_everyone()
        logger.info("Load checkpoint from {}".format(ckpt_path))

    def init_tracker(self, tracker_project_name, configs, entity):
        # Trackers
        self.accelerator.init_trackers(
            tracker_project_name,
            config=configs,
            init_kwargs={
                "wandb": {"entity": entity, }
            },
        )
        logging.info("Create Tracker - WanDB.")

    @property
    def local_rank(self) -> int:
        return self.accelerator.state.local_process_index

    @property
    def global_rank(self) -> int:
        return self.accelerator.state.process_index

    @property
    def node_rank(self) -> int:
        return os.environ.get('GROUP_RANK', 0)

    @property
    def world_size(self) -> int:
        return self.accelerator.state.num_processes
