import os
import cProfile
import pstats
import logging

import torch

from torch.nn.parallel.distributed import DistributedDataParallel

from mmdet3d.registry import HOOKS

from mmengine.hooks import Hook
from mmengine.hooks.hook import DATA_BATCH
from mmengine.runner import Runner
from mmengine.utils import mkdir_or_exist

# from ..sparse_super_token_encoder import SparseSuperTokenEncoder

try:
    from fvcore.nn import FlopCountAnalysis, parameter_count_table
except:
    def parameter_count_table(model, max_depth):
        return "FLOPs and params not available. Please install fvcore"


@HOOKS.register_module()
class DebugSaveHookSSTE(Hook):
    def __init__(self, warmup=100, interval=100, exit_at=-1, remove_prev=False):
        self.warmup = warmup
        self.interval = interval
        self.exit_at = exit_at
        self.remove_prev = remove_prev
        self.save_cnt = 0
        self.last_save = None
        
    def set_debug(self, model, value: bool):
        # assert isinstance(model.middle_encoder, SparseSuperTokenEncoder)
        model.middle_encoder.set_debug(value)
    
    def get_debug_info(self, model):
        # assert isinstance(model.middle_encoder, SparseSuperTokenEncoder)
        return model.middle_encoder.get_debug_info()
    
    def get_raw_model(self, runner):
        model = runner.model
        if isinstance(model, DistributedDataParallel):
            model = model.module
        return model
        
    def before_train_iter(
        self, runner: Runner, 
        batch_idx: int, 
        data_batch: DATA_BATCH = None, 
    ) -> None:
        if runner.rank == 0:
            model = self.get_raw_model(runner)
            if runner.iter > self.warmup and runner.iter % self.interval == 0:
                self.set_debug(model, True)
            else:
                self.set_debug(model, False)
        
    def after_train_iter(
        self, runner: Runner, 
        batch_idx: int, 
        data_batch: DATA_BATCH = None, 
        outputs: dict = None
    ) -> None:
        if runner.rank == 0 and runner.iter > self.warmup and runner.iter % self.interval == 0:
            self.save_cnt += 1
            if self.last_save is not None:
                os.remove(self.last_save)
            dstpath = os.path.join(runner._work_dir, f'{runner.timestamp}_iter_{runner.iter}_debuginfo.pth')
            model = self.get_raw_model(runner)
            states = self.get_debug_info(model)
            if states is not None:
                self.last_save = dstpath
                states["data"] = data_batch
                states["outputs"] = outputs
                torch.save(states, dstpath)
                if self.exit_at > 0 and self.save_cnt >= self.exit_at:
                    raise KeyboardInterrupt("exit")
    

@HOOKS.register_module()
class ProfilerHook(Hook):
    def __init__(self, interval=100, accumulation=10, warmup=100, log_to=None):
        self.interval = interval
        self.accumulation = accumulation
        self.warmup = warmup
        self.log_to = log_to
        self.profile = None
        self.start_iter = 0
    def before_train_iter(self, runner, batch_idx: int, data_batch: DATA_BATCH = None) -> None:
        if runner.iter > self.warmup and runner.iter % self.interval == 0:
            self.profile = cProfile.Profile()
            self.profile.enable()
            self.start_iter = runner.iter
    def after_train_iter(self, runner, batch_idx: int, data_batch: DATA_BATCH = None, outputs: dict = None) -> None:
        if self.profile is not None and self.start_iter + self.accumulation == runner.iter:
            self.profile.disable()
            if self.log_to is None:
                pstats.Stats(self.profile).sort_stats("cumtime").print_stats()
            else:
                mkdir_or_exist(os.path.dirname(self.log_to))
                with open(self.log_to, "w") as f:
                    pstats.Stats(self.profile, stream=f).sort_stats("cumtime").print_stats()
            self.profile = None
            self.start_iter = 0


@HOOKS.register_module()
class ParamCountHook(Hook):
    def __init__(self, max_depth=3) -> None:
        super().__init__()
        self.max_depth = max_depth
    def before_train(self, runner : Runner) -> None:
        if runner.rank == 0:
            model = runner.model
            if isinstance(model, DistributedDataParallel):
                model = model.module
            logger = runner.logger
            # calc number of params
            params_info = parameter_count_table(model, self.max_depth)
            logger.log(logging.INFO, f"\n{params_info}")
            dstpath = os.path.join(runner._work_dir, runner.timestamp, f'params.txt')
            with open(dstpath, "w") as f:
                f.write(params_info)