from datetime import datetime
from pathlib import Path

import torch

from .runner import Runner
from ..model.model import NNModel
from ..utils.config import Configer
from ..utils.model_dict import ModelDict
from ..worker import Trainer, Validator
from ..log import TBLogger, TXTLogger


class DevRunner(Runner):
    """
    开发相关的Runner类，运行过程需要log记录，在父类的基础上，额外管理训练、评估器
    """

    DATE_FORMAT = "%Y%m%d-%H%M%S"

    def __init__(
        self,
        model: NNModel,
        configer: Configer,
        resource_dir: str = None,
        gpu_id: int = None,
        trainer: Trainer = None,
        validator: Validator = None,
    ) -> None:
        super().__init__(model, configer, resource_dir, gpu_id)
        self._init_context()
        self._init_configs()
        self._model_preparation()

        self.sub_loggers = self._init_loggers()
        self.trainer = trainer
        if self.trainer is not None:
            self.trainer.set_runner(self)
        self.validator = validator
        if self.validator is not None:
            self.validator.set_runner(self)

    def run(self) -> None:
        # train模式
        if self.trainer is not None:
            while self.epoch_current < self.epoch_total:
                self.trainer.work()
                self._save_epoch()
                if self.validator and self.epoch_current % self.interval_eval == 0:
                    self.validator.work()
        # eval模式
        elif self.validator is not None:
            self.validator.work()
        return

    def _init_context(self) -> None:
        """重置上下文环境，主要是运行过程记录和输出"""
        runs_root = Path(self.resource_dir) / "runs"
        self.time_stamp = datetime.now().strftime(self.DATE_FORMAT)
        self.round_path = dict()
        self.round_path["parent"] = runs_root
        self.round_path["root"] = runs_root / self.time_stamp
        self.round_path["checkpoint"] = self.round_path["root"] / "checkpoint"
        self.round_path["log"] = self.round_path["root"] / "log"
        for _, path in self.round_path.items():
            path.mkdir(parents=True, exist_ok=True)
        self.configer.snapshot(self.round_path["root"])
        return

    def _init_configs(self) -> None:
        """对配置参数进行完善"""
        self.epoch_total = self.cfg.hyper.epochs
        self.epoch_current = -1
        self.batch_current = -1
        self.enable_amp = self.cfg.flow.use_amp
        self.interval_eval = self.__finite_positive(self.cfg.flow.interval_eval)
        self.save_limit = self.__finite_positive(self.cfg.flow.save_limit)
        return

    def _model_preparation(self):
        if "resume" in self.cfg.weight:
            self._load_model_resume(self.cfg.weight.resume)
        elif "pretrain" in self.cfg.weight:
            self._load_model_pretrain(self.cfg.weight.pretrain, self.cfg.weight.excluded)
        if "freezed" in self.cfg.weight:
            self._freeze_model_layers(self.cfg.weight.freezed)
        return

    def _load_model_resume(self, resume):
        if len(resume) == 0:
            return
        resume_path = self.round_path["parent"] / resume
        if resume_path.is_file():
            pth_path = str(resume_path)
        elif resume_path.is_dir():
            cps = list((resume_path / "checkpoint").glob("*.pth"))
            cps = sorted(cps, key=lambda x: int(x.stem), reverse=True)
            pth_path = str(cps[0])
        else:
            return
        print(f"Load checkpoint: {pth_path}")
        state_dict = torch.load(pth_path, map_location="cpu")
        self.model.load(state_dict)
        self.epoch_current = state_dict["epoch"]
        self.batch_current = state_dict["batch"]
        return

    def _load_model_pretrain(self, pretrain, excluded):
        if not Path(pretrain).is_file:
            return
        if not Path(pretrain).suffix == ".pth":
            return
        print(f"Load pretrain: {pretrain}")
        state_dict = torch.load(pretrain, map_location="cpu")
        if "network" in state_dict:
            state_dict = state_dict["network"]
        self.model.load_network_weights(state_dict, excluded)
        return

    def _freeze_model_layers(self, tag_names):
        if len(tag_names) == 0:
            return
        self.model.freeze_network_weights(tag_names)
        return

    def _init_loggers(self):
        return [
            TBLogger(self.round_path["log"], self.cfg.log.step.tb),
            TXTLogger(self.round_path["log"], self.cfg.log.step.txt),
        ]

    def _save_epoch(self) -> None:
        state_dict = self.model.state_dict()
        state_dict["epoch"] = self.epoch_current
        state_dict["batch"] = self.batch_current
        save_path = self.round_path["checkpoint"] / f"{self.epoch_current}.pth"
        torch.save(state_dict, str(save_path))
        self._limited_checkpoints()
        return

    def _limited_checkpoints(self) -> None:
        cps = list(self.round_path["checkpoint"].glob("*.pth"))
        if len(cps) > self.save_limit:
            cps = sorted(cps, key=lambda x: int(x.stem), reverse=True)
            outdated = cps[self.save_limit :]
            for x in outdated:
                x.unlink()
        return

    def __finite_positive(self, x):
        return x if x > 0 else self.LARGE_INT
