from functools import partial
from typing import Any, Dict, Optional, Tuple
from warnings import warn
from omegaconf import DictConfig, ListConfig

from torch import nn
from torch.distributed import destroy_process_group, init_process_group
from torch.optim import Optimizer
from torch.utils.data import DataLoader, DistributedSampler
from torchtune import config, modules, rlhf, training, utils
from torchtune.data import CROSS_ENTROPY_IGNORE_IDX, padded_collate_dpo
from torchtune.datasets import ConcatDataset
from torchtune.recipe_interfaces import FTRecipeInterface
from torchtune.rlhf.loss import SimPOLoss
from tqdm import tqdm
import sys
import time
import torch
from torchtune.training.activations import apply_selective_activation_checkpointing

log = utils.get_logger("DEBUG")


class FullFinetuneDPORecipeDistributed(FTRecipeInterface):
    """
    Combined recipe for full parameter fine-tuning and DPO training for dense transformer-based LLMs such as Llama2.
    This recipe supports distributed training and can be run on a single node (1 to 8 GPUs).

    Features:
        - FSDP, Activation Checkpointing, Precision, Gradient Accumulation, Checkpointing, Logging
        - Supports both full parameter fine-tuning and DPO training
    """

    def __init__(self, cfg: DictConfig) -> None:
        self._device = utils.get_device(device=cfg.device)
        self._dtype = training.get_dtype(cfg.dtype, device=self._device)

        if self._dtype == torch.float16:
            raise ValueError(
                "full fp16 training is not supported with this recipe. Please use bf16 or fp32 instead."
            )

        _, rank = training.get_world_size_and_rank()
        self._is_rank_zero = rank == 0

        self._output_dir = cfg.output_dir
        self._log_every_n_steps = cfg.get("log_every_n_steps", 1)
        self._log_peak_memory_stats = cfg.get("log_peak_memory_stats", False)

        self._enable_activation_checkpointing = cfg.enable_activation_checkpointing

        self.seed = training.set_seed(seed=cfg.seed)
        self.epochs_run = 0
        self.total_epochs = cfg.epochs
        self.max_steps_per_epoch = cfg.max_steps_per_epoch
        self.global_step = 0
        self._resume_from_checkpoint = cfg.resume_from_checkpoint
        self._gradient_accumulation_steps = cfg.gradient_accumulation_steps

    def load_checkpoint(self, cfg_checkpointer: DictConfig) -> Dict[str, Any]:
        self._checkpointer = config.instantiate(
            cfg_checkpointer,
            resume_from_checkpoint=self._resume_from_checkpoint,
        )
        checkpoint_dict = self._checkpointer.load_checkpoint()

        if self._resume_from_checkpoint:
            self._update_recipe_state(checkpoint_dict)
        return checkpoint_dict

    def _update_recipe_state(self, ckpt_dict: Dict[str, Any]) -> None:
        try:
            self.epochs_run = ckpt_dict[training.EPOCHS_KEY]
        except KeyError as e:
            raise KeyError(f"Missing key in checkpoint: {e}")

    def setup(self, cfg: DictConfig) -> None:
        if self._is_rank_zero:
            log.info("Setting up the recipe...")

        checkpoint_dict = self.load_checkpoint(cfg_checkpointer=cfg.checkpointer)

        self._model = self._setup_model(
            cfg_model=cfg.model,
            enable_activation_checkpointing=cfg.enable_activation_checkpointing,
            fsdp_cpu_offload=cfg.get("fsdp_cpu_offload", False),
            reshard_after_forward=cfg.get("fsdp_reshard_after_forward", True),
            model_state_dict=checkpoint_dict[training.MODEL_KEY],
        )
        self._tokenizer = config.instantiate(cfg.tokenizer)

        self._optimizer = self._setup_optimizer(
            cfg_optimizer=cfg.optimizer,
            opt_state_dict=(
                checkpoint_dict[training.OPT_KEY]
                if self._resume_from_checkpoint
                else None
            ),
        )

        self._loss_fn = config.instantiate(cfg.loss)
        if self._is_rank_zero:
            log.info("Loss function initialized.")

        self._sampler, self._dataloader = self._setup_data(
            cfg_dataset=cfg.dataset,
            shuffle=cfg.shuffle,
            batch_size=cfg.batch_size,
        )

        self._steps_per_epoch = (
            len(self._dataloader) // self._gradient_accumulation_steps
        )
        if (
            self.max_steps_per_epoch is not None
            and self.max_steps_per_epoch < self._steps_per_epoch
        ):
            self._steps_per_epoch = self.max_steps_per_epoch
        self.global_step = self.epochs_run * self._steps_per_epoch

        self._lr_scheduler = self._setup_lr_scheduler(
            cfg_lr_scheduler=cfg.lr_scheduler,
            num_training_steps=self.total_epochs * self._steps_per_epoch,
            last_epoch=self.global_step - 1,
        )

    def _setup_model(
        self,
        cfg_model: DictConfig,
        enable_activation_checkpointing: bool,
        fsdp_cpu_offload: bool,
        reshard_after_forward: bool,
        model_state_dict: Dict[str, Any],
    ) -> nn.Module:
        if self._is_rank_zero:
            log.info("Initializing model...")

        with training.set_default_dtype(self._dtype), torch.device("meta"):
            model = config.instantiate(cfg_model)

        if enable_activation_checkpointing:
            apply_selective_activation_checkpointing(model)

        training.shard_model(
            model=model,
            shard_conditions=[],
            cpu_offload=fsdp_cpu_offload,
            reshard_after_forward=reshard_after_forward,
        )

        with training.set_default_dtype(self._dtype), self._device:
            base_missing, base_unexpected = training.load_from_full_model_state_dict(
                model,
                model_state_dict,
                self._device,
                self._is_rank_zero,
                cpu_offload=fsdp_cpu_offload,
            )

        training.validate_no_params_on_meta_device(model)
        if self._is_rank_zero:
            log.info("Model initialized.")

        torch.distributed.barrier()

        return model

    def _setup_optimizer(
        self, cfg_optimizer: DictConfig, opt_state_dict: Optional[Dict[str, Any]] = None
    ) -> Optimizer:
        optimizer = config.instantiate(cfg_optimizer, self._model.parameters())
        if opt_state_dict:
            optimizer.load_state_dict(opt_state_dict)

        if self._is_rank_zero:
            log.info("Optimizer initialized.")
        return optimizer

    def _setup_lr_scheduler(
        self,
        cfg_lr_scheduler: DictConfig,
        num_training_steps: int,
        last_epoch: int,
    ) -> Optimizer:
        lr_scheduler = config.instantiate(
            cfg_lr_scheduler,
            self._optimizer,
            num_training_steps=num_training_steps,
            last_epoch=last_epoch,
        )
        if self._is_rank_zero:
            log.info("Learning rate scheduler initialized.")
        return lr_scheduler

    def _setup_data(
        self,
        cfg_dataset: DictConfig,
        shuffle: bool,
        batch_size: int,
    ) -> Tuple[DistributedSampler, DataLoader]:
        world_size, rank = training.get_world_size_and_rank()

        if isinstance(cfg_dataset, ListConfig):
            ds = ConcatDataset([config.instantiate(d) for d in cfg_dataset])
        else:
            ds = config.instantiate(cfg_dataset)

        sampler = DistributedSampler(
            ds, num_replicas=world_size, rank=rank, shuffle=shuffle, seed=0
        )

        dataloader = DataLoader(
            dataset=ds,
            batch_size=batch_size,
            sampler=sampler,
            drop_last=True,
            collate_fn=partial(
                padded_collate_dpo,
                padding_idx=self._tokenizer.pad_id,
                ignore_idx=CROSS_ENTROPY_IGNORE_IDX,
            ),
        )

        if self._is_rank_zero:
            log.info("DataLoader initialized.")

        return sampler, dataloader

    def save_checkpoint(
        self,
        epoch: int,
    ) -> None:
        checkpoint_dict = {}

        intermediate_checkpoint = epoch + 1 < self.total_epochs
        cpu_state_dict = training.get_full_model_state_dict(
            self._model,
            self._is_rank_zero,
            device=self._device,
        )
        checkpoint_dict[training.MODEL_KEY] = cpu_state_dict

        if self._is_rank_zero:
            self._checkpointer.save_checkpoint(checkpoint_dict, epoch)

    def concatenated_forward(
        self, model: nn.Module, batch: Tuple[torch.Tensor, torch.Tensor]
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
        concatenated_input_ids, concatenated_labels = batch
        concatenated_input_ids = concatenated_input_ids.to(self._device)
        concatenated_labels = concatenated_labels.to(self._device)

        len_chosen = concatenated_input_ids.shape[0] // 2

        all_logits = model(concatenated_input_ids)

        all_log_probs = rlhf.get_batch_log_probs(
            all_logits,
            concatenated_labels,
            return_average_logprobs=isinstance(self._loss_fn, SimPOLoss),
        )

        chosen_log_probs = all_log_probs[:len_chosen]
        rejected_log_probs = all_log_probs[len_chosen:]

        chosen_logits = all_logits[:len_chosen]
        rejected_logits = all_logits[len_chosen:]

        return (chosen_log_probs, rejected_log_probs, chosen_logits, rejected_logits)

    def train(self) -> None:
        training.cleanup_before_training()

        _, rank = training.get_world_size_and_rank()

        self._optimizer.zero_grad()

        t0 = time.perf_counter()
        running_loss = 0
        num_tokens = 0

        self._profiler.start()
        for curr_epoch in range(self.epochs_run, self.total_epochs):
            for step, batch in enumerate(self._dataloader):
                self._model.train()
                chosen_log_probs, rejected_log_probs, chosen_logits, rejected_logits = self.concatenated_forward(
                    self._model, batch
                )

                loss = self._loss_fn(
                    chosen_log_probs, rejected_log_probs, chosen_logits, rejected_logits
                )
                loss.backward()

                if (step + 1) % self._gradient_accumulation_steps == 0:
                    self._optimizer.step()
                    self._optimizer.zero_grad()
                    self.global_step += 1

                running_loss += loss.item()
                num_tokens += batch[0].numel()

                if self._is_rank_zero and (step + 1) % self._log_every_n_steps == 0:
                    log.info(
                        f"Epoch {curr_epoch + 1}/{self.total_epochs}, Step {step + 1}/{len(self._dataloader)}, Loss: {running_loss / (step + 1)}, Tokens: {num_tokens}"
                    )

            self.save_checkpoint(curr_epoch)

        self._profiler.stop()

    def cleanup(self) -> None:
        if self._is_rank_zero:
            log.info("Cleaning up...")
        destroy_process_group()


@config.parse
def recipe_main(cfg: DictConfig) -> None:
    if not training.is_distributed():
        raise ValueError("Distributed training is required for this recipe.")
    if cfg.get("fsdp_cpu_offload", False):
        raise ValueError("FSDP CPU offload is not supported for this recipe.")
    init_process_group(backend="gloo" if cfg.device == "cpu" else "nccl")

    config.log_config(recipe_name="FullFinetuneDPORecipeDistributed", cfg=cfg)

    recipe = FullFinetuneDPORecipeDistributed(cfg=cfg)
    recipe.setup(cfg=cfg)
    recipe.train()
    recipe.cleanup()


if __name__ == "__main__":
    sys.exit(recipe_main())