"""
Trainer

Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""

import os
import sys
import weakref
import torch
import torch.nn as nn
import torch.utils.data
from functools import partial

# from .train import TrainerBase

if sys.version_info >= (3, 10):
    from collections.abc import Iterator
else:
    from collections import Iterator
from tensorboardX import SummaryWriter

from .defaults import create_ddp_model, worker_init_fn
from .hooks import HookBase, build_hooks
import pointcept.utils.comm as comm
from pointcept.datasets import build_dataset, point_collate_fn, collate_fn
from pointcept.models import build_model
from pointcept.utils.logger import get_root_logger
from pointcept.utils.optimizer import build_optimizer
from pointcept.utils.scheduler import build_scheduler
from pointcept.utils.events import EventStorage, ExceptionWriter
from pointcept.utils.registry import Registry

TRAINERS = Registry("trainers")

class TrainerBase:
    def __init__(self) -> None:
        self.hooks = []
        self.epoch = 0
        self.start_epoch = 0
        self.max_epoch = 0
        self.max_iter = 0
        self.comm_info = dict()
        self.data_iterator: Iterator = enumerate([])
        self.storage: EventStorage
        self.writer: SummaryWriter

    def register_hooks(self, hooks) -> None:
        hooks = build_hooks(hooks)
        for h in hooks:
            assert isinstance(h, HookBase)
            # To avoid circular reference, hooks and trainer cannot own each other.
            # This normally does not matter, but will cause memory leak if the
            # involved objects contain __del__:
            # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
            h.trainer = weakref.proxy(self)
        self.hooks.extend(hooks)

    def train(self):
        with EventStorage() as self.storage:
            # => before train
            self.before_train()
            for self.epoch in range(self.start_epoch, self.max_epoch):
                # => before epoch
                self.before_epoch()
                # => run_epoch
                for (
                        self.comm_info["iter"],
                        self.comm_info["input_dict"],
                ) in self.data_iterator:
                    # => before_step
                    self.before_step()
                    # => run_step
                    self.run_step()
                    # => after_step
                    self.after_step()
                # => after epoch
                self.after_epoch()
            # => after train
            self.after_train()

    def before_train(self):
        for h in self.hooks:
            h.before_train()

    def before_epoch(self):
        for h in self.hooks:
            h.before_epoch()

    def before_step(self):
        for h in self.hooks:
            h.before_step()

    def run_step(self, model, optimizer, scaler, scheduler=None):
        """
        Perform a single training step.

        Args:
            model (nn.Module): The model to train.
            optimizer (Optimizer): The optimizer to use.
            scaler (GradScaler or None): The gradient scaler for mixed precision training.
            scheduler (Scheduler or None): The learning rate scheduler (optional).
        """
        raise NotImplementedError

    def after_step(self):
        for h in self.hooks:
            h.after_step()

    def after_epoch(self):
        for h in self.hooks:
            h.after_epoch()
        self.storage.reset_histories()

    def after_train(self):
        # Sync GPU before running train hooks
        comm.synchronize()
        for h in self.hooks:
            h.after_train()
        if comm.is_main_process():
            self.writer.close()

@TRAINERS.register_module("DefaultTrainer")
class Trainer(TrainerBase):
    def __init__(self, cfgs):
        super().__init__()
        self.epoch = 0
        self.start_epoch = 0
        self.max_epoch = cfgs[0].eval_epoch
        self.best_metric_values = [-torch.inf] * len(cfgs)
        self.logger = get_root_logger(
            log_file=os.path.join(cfgs[0].save_path, "train.log"),
            file_mode="a" if cfgs[0].resume else "w",
        )
        self.cfgs = cfgs
        self.models = []
        self.train_loaders = []
        self.val_loaders = []
        self.optimizers = []
        self.schedulers = []
        self.scalers = []
        self.writers = []

        for i, cfg in enumerate(self.cfgs):
            self.logger.info(f"=> Loading config {i} ...")
            self.logger.info(f"Save path: {cfg.save_path}")
            self.logger.info(f"Config:\n{cfg.pretty_text}")
            self.logger.info("=> Building model ...")
            model = self.build_model(cfg)
            self.models.append(model)
            self.logger.info("=> Building writer ...")
            writer = self.build_writer(cfg)
            self.writers.append(writer)
            self.logger.info("=> Building train dataset & dataloader ...")
            train_loader = self.build_train_loader(cfg)
            self.train_loaders.append(train_loader)
            self.logger.info("=> Building val dataset & dataloader ...")
            val_loader = self.build_val_loader(cfg)
            self.val_loaders.append(val_loader)
            self.logger.info("=> Building optimize, scheduler, scaler(amp) ...")
            optimizer = self.build_optimizer(cfg, model)
            self.optimizers.append(optimizer)
            scheduler = self.build_scheduler(cfg, optimizer)
            self.schedulers.append(scheduler)
            scaler = self.build_scaler(cfg)
            self.scalers.append(scaler)

    def train(self):
        with EventStorage() as self.storage, ExceptionWriter():
            # => before train
            self.before_train()
            self.logger.info(">>>>>>>>>>>>>>>> Start Training >>>>>>>>>>>>>>>>")
            for self.epoch in range(self.start_epoch, self.max_epoch):
                for i, (model, train_loader, optimizer, scheduler, scaler) in enumerate(zip(
                        self.models, self.train_loaders, self.optimizers, self.schedulers, self.scalers)):
                    # => before epoch
                    if comm.get_world_size() > 1:
                        train_loader.sampler.set_epoch(self.epoch)
                    model.train()
                    data_iterator = enumerate(train_loader)
                    self.before_epoch()
                    # => run_epoch
                    for (
                            self.comm_info["iter"],
                            self.comm_info["input_dict"],
                    ) in data_iterator:
                        # => before_step
                        self.before_step()
                        # => run_step
                        self.run_step(model, optimizer, scaler, scheduler)
                        # => after_step
                        self.after_step()
                    # => after epoch
                    self.after_epoch()
            # => after train
            self.after_train()

    def run_step(self, model, optimizer, scaler, scheduler):
        input_dict = self.comm_info["input_dict"]
        for key in input_dict.keys():
            if isinstance(input_dict[key], torch.Tensor):
                input_dict[key] = input_dict[key].cuda(non_blocking=True)
        with torch.cuda.amp.autocast(enabled=self.cfgs[self.models.index(model)].enable_amp):
            output_dict = model(input_dict)
            loss = output_dict["loss"]
        optimizer.zero_grad()
        if self.cfgs[self.models.index(model)].enable_amp:
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
        else:
            loss.backward()
            optimizer.step()
        if self.cfgs[self.models.index(model)].empty_cache:
            torch.cuda.empty_cache()
        self.comm_info["model_output_dict"] = output_dict
        scheduler.step()

    def after_epoch(self, model_index, scheduler):
        for h in self.hooks:
            h.after_epoch()
        self.storage.reset_histories()
        if self.cfgs[model_index].empty_cache_per_epoch:
            torch.cuda.empty_cache()

    def build_model(self, cfg):
        model = build_model(cfg.model)
        if cfg.sync_bn:
            model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
        n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
        self.logger.info(f"Model {self.models.index(model)}: Num params: {n_parameters}")
        model = create_ddp_model(
            model.cuda(),
            broadcast_buffers=False,
            find_unused_parameters=cfg.find_unused_parameters,
        )
        return model

    def build_writer(self, cfg):
        writer = SummaryWriter(cfg.save_path) if comm.is_main_process() else None
        self.logger.info(f"Tensorboard writer logging dir: {cfg.save_path}")
        return writer

    def build_train_loader(self, cfg):
        train_data = build_dataset(cfg.data.train)
        print("train_data")
        print(train_data)
        if comm.get_world_size() > 1:
            train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
        else:
            train_sampler = None

        init_fn = (
            partial(
                worker_init_fn,
                num_workers=cfg.num_worker_per_gpu,
                rank=comm.get_rank(),
                seed=cfg.seed,
            )
            if cfg.seed is not None
            else None
        )

        train_loader = torch.utils.data.DataLoader(
            train_data,
            batch_size=cfg.batch_size_per_gpu,
            shuffle=(train_sampler is None),
            num_workers=cfg.num_worker_per_gpu,
            sampler=train_sampler,
            collate_fn=partial(point_collate_fn, mix_prob=cfg.mix_prob),
            pin_memory=True,
            worker_init_fn=init_fn,
            drop_last=True,
            persistent_workers=True,
        )
        return train_loader

    def build_val_loader(self, cfg):
        val_loader = None
        if cfg.evaluate:
            val_data = build_dataset(cfg.data.val)
            if comm.get_world_size() > 1:
                val_sampler = torch.utils.data.distributed.DistributedSampler(val_data)
            else:
                val_sampler = None
            val_loader = torch.utils.data.DataLoader(
                val_data,
                batch_size=cfg.batch_size_val_per_gpu,
                shuffle=False,
                num_workers=cfg.num_worker_per_gpu,
                pin_memory=True,
                sampler=val_sampler,
                collate_fn=collate_fn,
            )
        return val_loader

    def build_optimizer(self, cfg, model):
        return build_optimizer(cfg.optimizer, model, cfg.param_dicts)

    def build_scheduler(self, cfg, optimizer):
        assert hasattr(self, "optimizer")
        assert hasattr(self, "train_loader")
        cfg.scheduler.total_steps = len(self.train_loaders[0]) * cfg.eval_epoch
        return build_scheduler(cfg.scheduler, optimizer)

    def build_scaler(self, cfg):
        scaler = torch.cuda.amp.GradScaler() if cfg.enable_amp else None
        return scaler
