"""Copyright(c) 2023 lyuwenyu. All Rights Reserved.
"""

import torch 
import torch.nn as nn 

from datetime import datetime
from pathlib import Path 
from typing import Dict
import atexit

from ..misc import dist_utils
from ..core import BaseConfig


def to(m: nn.Module, device: str):
    if m is None:
        return None 
    return m.to(device) 


class BaseSolver(object):
    def __init__(self, cfg: BaseConfig) -> None:
        self.cfg = cfg 

    def _setup(self, ):
        """初始化设置，避免实例化不必要的类"""
        cfg = self.cfg
        if cfg.device:
            device = torch.device(cfg.device)  # 根据配置设置设备
        else:
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 默认使用GPU，否则使用CPU

        self.model = cfg.model  # 设置模型
        
        # NOTE (lyuwenyu): 必须在ema实例构建前加载调优状态
        if self.cfg.tuning:
            print(f'tuning checkpoint from {self.cfg.tuning}')  # 打印调优检查点路径
            self.load_tuning_state(self.cfg.tuning)  # 加载调优状态

        self.model = dist_utils.warp_model(self.model.to(device), sync_bn=cfg.sync_bn, \
            find_unused_parameters=cfg.find_unused_parameters)  # 将模型移动到设备并进行包装

        self.criterion = to(cfg.criterion, device)  # 设置损失函数
        self.postprocessor = to(cfg.postprocessor, device)  # 设置后处理模块

        self.ema = to(cfg.ema, device)  # 设置EMA
        self.scaler = cfg.scaler  # 设置缩放器

        self.device = device  # 设置设备
        self.last_epoch = self.cfg.last_epoch  # 设置最后一个epoch
        
        self.output_dir = Path(cfg.output_dir)  # 设置输出目录
        self.output_dir.mkdir(parents=True, exist_ok=True)  # 创建输出目录
        self.writer = cfg.writer  # 设置写入器

        if self.writer:
            atexit.register(self.writer.close)  # 注册退出时关闭写入器
            if dist_utils.is_main_process():
                self.writer.add_text(f'config', '{:s}'.format(cfg.__repr__()), 0)  # 添加配置信息到写入器

    def cleanup(self, ):
        # 注册退出时关闭写入器
        if self.writer:
            atexit.register(self.writer.close)

    def train(self, ):
        # 初始化设置
        self._setup()
        # 设置优化器、学习率调度器和学习率预热调度器
        self.optimizer = self.cfg.optimizer
        self.lr_scheduler = self.cfg.lr_scheduler
        self.lr_warmup_scheduler = self.cfg.lr_warmup_scheduler

        # 设置训练和验证数据加载器
        self.train_dataloader = dist_utils.warp_loader(self.cfg.train_dataloader, \
            shuffle=self.cfg.train_dataloader.shuffle)
        self.val_dataloader = dist_utils.warp_loader(self.cfg.val_dataloader, \
            shuffle=self.cfg.val_dataloader.shuffle)

        # 设置评估器
        self.evaluator = self.cfg.evaluator

        # 如果配置了恢复检查点，则加载恢复状态
        # NOTE instantiating order
        if self.cfg.resume:
            print(f'Resume checkpoint from {self.cfg.resume}')
            self.load_resume_state(self.cfg.resume)

    def eval(self, ):
        # 初始化设置
        self._setup()

        # 设置验证数据加载器
        self.val_dataloader = dist_utils.warp_loader(self.cfg.val_dataloader, \
            shuffle=self.cfg.val_dataloader.shuffle)

        # 设置评估器
        self.evaluator = self.cfg.evaluator
        
        # 如果配置了恢复检查点，则加载恢复状态
        if self.cfg.resume:
            print(f'Resume checkpoint from {self.cfg.resume}')
            self.load_resume_state(self.cfg.resume)

    def to(self, device):
        # 将所有可移动到指定设备的对象移动到该设备
        for k, v in self.__dict__.items():
            if hasattr(v, 'to'):
                v.to(device)

    def state_dict(self):
        """state dict, train/eval
        """
        state = {}
        # 记录当前日期时间
        state['date'] = datetime.now().isoformat()
        
        # TODO for resume
        # 记录最后一个epoch
        state['last_epoch'] = self.last_epoch

        # 将所有具有state_dict方法的对象的状态字典添加到state中
        for k, v in self.__dict__.items():
            if hasattr(v, 'state_dict'):
                v = dist_utils.de_parallel(v)
                state[k] = v.state_dict() 

        return state


    def load_state_dict(self, state):
        """load state dict, train/eval
        """
        # TODO
        # 如果state中有last_epoch，则加载last_epoch
        if 'last_epoch' in state:
            self.last_epoch = state['last_epoch']
            print('Load last_epoch')

        # 加载state中包含的对象的状态字典
        for k, v in self.__dict__.items():
            if hasattr(v, 'load_state_dict') and k in state:
                v = dist_utils.de_parallel(v)
                v.load_state_dict(state[k])
                print(f'Load {k}.state_dict')

            if hasattr(v, 'load_state_dict') and k not in state:
                print(f'Not load {k}.state_dict')


    def load_resume_state(self, path: str):
        """load resume
        """
        # for cuda:0 memory
        # 根据路径加载状态字典
        if path.startswith('http'):
            state = torch.hub.load_state_dict_from_url(path, map_location='cpu')
        else:
            state = torch.load(path, map_location='cpu')

        # 加载状态字典
        self.load_state_dict(state)

    
    def load_tuning_state(self, path: str,):
        """only load model for tuning and skip missed/dismatched keys
        """
        # 根据路径加载状态字典
        if path.startswith('http'):
            state = torch.hub.load_state_dict_from_url(path, map_location='cpu')
        else:
            state = torch.load(path, map_location='cpu')

        # 获取去并行化的模型
        module = dist_utils.de_parallel(self.model)
        
        # TODO hard code
        # 根据状态字典中的键加载模型状态
        if 'ema' in state:
            stat, infos = self._matched_state(module.state_dict(), state['ema']['module'])
        else:
            stat, infos = self._matched_state(module.state_dict(), state['model'])

        # 加载匹配的状态字典，跳过不匹配或缺失的键
        module.load_state_dict(stat, strict=False)
        print(f'Load model.state_dict, {infos}')


    @staticmethod
    def _matched_state(state: Dict[str, torch.Tensor], params: Dict[str, torch.Tensor]):
        missed_list = []
        unmatched_list = []
        matched_state = {}
        # 匹配状态字典中的键值对
        for k, v in state.items():
            if k in params:
                if v.shape == params[k].shape:
                    matched_state[k] = params[k]
                else:
                    unmatched_list.append(k)
            else:
                missed_list.append(k)

        return matched_state, {'missed': missed_list, 'unmatched': unmatched_list}


    def fit(self, ):
        # 未实现的方法
        raise NotImplementedError('')


    def val(self, ):
        # 未实现的方法
        raise NotImplementedError('')
