# -*- coding: utf-8 -*-
# @Time : 2022/12/28 18:12
# @Author : Zdh
import logging
import math
import datetime
import visdom
import os
import re

from ..utils import get_logger
from ..utils import get_class, save_model
from ..utils.train_tools import load_model_parameter, set_optimizer_lr, load_optimizer_parameter, load_scheduler_parameter
from tqdm import tqdm
import torch
import torch.nn as nn
from SimpleDL.utils import AverageValueMeter
from ..model.losses import MixLoss

class BasicTrainer(object):
    def __init__(self, opt):
        super(BasicTrainer, self).__init__()
        self.opt = opt
        self.train_opt = opt.get("train", None)
        self.main_logger = get_logger(opt["global"]["logger_name"])  # 主日志
        # ******* 必须包含的初始化 ******* #
        device_ids, self.device = self.get_device()
        self.model = get_class(opt=opt["model"])  # 模型的加载
        if opt["model"].get("pretrain", None) and self.train_opt.get("retrain", None) is None:    # 预训练参数加载
            pretrain_path = opt["model"].get("pretrain")
            net_key = opt["model"].get("pretrain_key", "net")
            strict = opt["model"].get("strict", None)
            strict = True if strict is None else strict
            self.model, load_set_msg = load_model_parameter(self.model, pretrain_path, net_key, strict)
            self.main_logger.info(f"成功加载所有参数 form {pretrain_path}")

        self.criterion = MixLoss(opt.get("losses", None))
        self.optimizer = get_class(opt=opt["optimizer"], params=self.model.parameters())  # 加载优化器
        self.dataset = get_class(opt=opt["dataset"])  # 加载数据集

        self.multi_scale = False                            # 多尺度训练部分
        collate_fn = None
        if opt["train"].get("multi_scale") is not None:
            collate_fn = get_class(opt=opt["train"]["multi_scale"])
            self.multi_scale = True

        self.dataloader = get_class(opt=opt["dataloader"], dataset=self.dataset, collate_fn=collate_fn)  # 加载数据集加载器
        # self.dataloader = get_class(opt=opt["dataloader"], dataset=self.dataset)  # 加载数据集加载器
        # 可选的初始化
        if opt.get("scheduler", None) is None:
            self.scheduler = None
        else:
            self.scheduler = get_class(opt=opt["scheduler"], optimizer=self.optimizer)

        self.scheduler_step = self.opt["train"].get("scheduler_step", None)  # 调度优化器学习率
        self.visdomConfig = self.opt.get("visdomConfig", None)               # 是否使用visdom
        if self.visdomConfig is not None:
            self.visdom_name = self.visdomConfig.get("name", self.opt["global"]["name"])        # 不指定则默认使用同名
            if self.visdom_name in visdom.Visdom().get_env_list():
                current_datetime = datetime.datetime.now()
                self.visdom_name += str(current_datetime.strftime("_%Y%m%d-%H%M%S"))
            self.visdom_visdom_step = self.visdomConfig.get("visdom_step", None)
            self.vis_bar = visdom.Visdom(env=self.visdom_name)
            if not self.vis_bar.check_connection():
                self.visdomConfig = None
                self.main_logger.warn("未能连接到visdom服务，请确保服务开启。本次训练跳过visdom显示")
            else:
                self.main_logger.info(f"创建visdom成功，使用visdom名[{self.visdom_name}]")


        self.use_scheduler = False
        if self.scheduler_step and self.scheduler:
            if not isinstance(self.scheduler_step, list) or not isinstance(self.scheduler_step[0], str):
                err_msg = "scheduler_step must be like [\"epoch\", 1] or [\"iter\", 500], so there are no scheduler be used"
                self.main_logger.warning(err_msg)
                self.use_scheduler = False
            else:
                info_msg = f"scheduler every {self.scheduler_step[1]} {self.scheduler_step[0]} step once"
                self.main_logger.info(info_msg)
                self.use_scheduler = True
        # 训练过程的一些参数的加载
        self.max_epoch = self.train_opt.get("max_epoch")
        self.img_key = opt["model"]["input_name"]

        self.warm_up_iter = opt["train"].get("warm_up", None)
        self.warm_init_lr = opt["train"].get("warmup_init_lr", None)
        self.init_lr = opt["train"]["lr"]
        self.warmup_flag = False
        if self.warm_up_iter and self.warm_init_lr and self.warm_up_iter > 0:
            self.warmup_flag = True
            self.main_logger.info(f"use warm up, from {self.warm_init_lr} to {self.init_lr} at {self.warm_init_lr} iter")
            self.warm_up_step = (self.init_lr - self.warm_init_lr)/self.warm_up_iter
        self.start_epoch = 0
        # 加载断点继续训练的部分
        self.retrain()
        # 多GPU训练
        if device_ids is not None:
            self.model = nn.DataParallel(self.model, device_ids)
        self.model.to(self.device)
        # 非配置类加载
        self.metric = AverageValueMeter(ddof=1)
        self.iter_now = 0

    def train(self):
        iter_count = 0
        self.model.train()
        for epoch in range(self.start_epoch, self.max_epoch):
            tqdm_bar = tqdm(enumerate(self.dataloader), total=len(self.dataloader), desc=f"epoch:{epoch}")
            self.metric.reset()
            for ii, data in tqdm_bar:
                # 模型推理与参数更新部分
                loss_all = None
                if self.multi_scale:
                    for key, val in data.items():
                        img = val[self.img_key].to(self.device)
                        out = self.model(img)
                        loss_all_step, loss_recode = self.criterion(out, val)
                        # TODO 删除  此处仅是一个临时方案
                        # TODO 再说一遍这里需要删除
                        if 768 in key:
                            loss_all_step = loss_all_step * 2
                        loss_all = loss_all + loss_all_step if loss_all is not None else loss_all_step
                else:
                    img = data[self.img_key].to(self.device)  # 获得输入图像
                    out = self.model(img)  # 获得模型输出
                    loss_all, loss_recode = self.criterion(out, data)  # 计算损失函数

                self._liner_warm_up()               # warmup部分
                self.optimizer.zero_grad()  # 清空梯度
                loss_all.backward()  # 反向传播
                self.optimizer.step()  # 更新模型参数

                # 其他每一iter需要执行的任务
                self.metric.add(loss_all)
                if self.use_scheduler and self.scheduler_step[0] == "iter" and (iter_count + 1) % self.scheduler_step[1] == 0:
                    self.scheduler.step()       # 此处需要传入call参数
                self.visdom_show("iter", iter_count)

                self.iter_now += 1
                self.tqdm_postfix_show(tqdm_bar)  # 在tqdm上显示一些尾缀提示信息

            self._save_model(epoch)
            if self.use_scheduler and self.scheduler_step[0] == "epoch" and (epoch + 1) % self.scheduler_step[1] == 0:
                self.scheduler.step(float(self.metric.mean()))  # 此处需要重写灵活性的方法
            self.visdom_show("epoch", epoch)

    def tqdm_postfix_show(self, tqdm_bar, **kwargs):
        postfix_dict = {
            "loss_now": f"{float(self.metric.val):.5f}",
            "loss_mean": f"{self.metric.mean():.5f}",
        }
        for key, val in kwargs.items():
            postfix_dict[key] = val
        lr_now = self.optimizer.param_groups[0]["lr"]
        postfix_dict["lr"] = f"{lr_now:.6f}"
        postfix_dict["iter"] = self.iter_now
        tqdm_bar.set_postfix_str(postfix_dict)

    def visdom_show(self, step_str, step_count):
        if self.visdomConfig is None:
            return
        if self.visdom_visdom_step is None or self.visdom_visdom_step[0] != step_str:
            return
        if self.visdom_visdom_step[0] == step_str and (step_count+1)%self.visdom_visdom_step[1] == 0:
            self.vis_bar.line(
                X = [step_count],
                Y = [float(self.metric.mean())],
                win = "loss",
                update = "append",
                name="Loss",
                opts={
                    'showlegend': True,  # 显示网格
                    'title': "损失曲线",
                    'xlabel': step_str,  # x轴标签
                    'ylabel': "Loss value",  # y轴标签
                }
            )


    def get_device(self):
        device_ids = []
        msg_info = "used device information:\r\n"
        device_msg = self.opt["train"].get("device", None)

        if isinstance(device_msg, list) or isinstance(device_msg, tuple) and device_msg != []:  # 指定使用GPU
            device_ids = list(device_msg)
            device = f"cuda:{device_ids[0]}"
            for device_id in device_msg:
                cuda_msg = torch.cuda.get_device_properties(device_id)
                msg_info += f"\tuse  cuda:{device_id}-【{cuda_msg.name}】 with {cuda_msg.total_memory / (1 << 20):.0f}MiB\r\n"
            if len(device_ids) == 1:
                device_ids = None
        elif device_msg is not None and device_msg.lower() == "cpu":  # 指定使用cpu
            device_ids = None
            device = "cpu"
            msg_info += "CPU only"
        elif isinstance(device_msg, int):
            device_ids = None
            device = f"cuda:{device_msg}"
        else:  # 未指定 则自适应调整
            msg_info = "未指定驱动设备，自适应选择下列设备进行训练：\r\n"
            if torch.cuda.is_available() and torch.cuda.device_count() > 0:
                for device_id in range(torch.cuda.device_count()):
                    device_ids.append(device_id)
                    cuda_msg = torch.cuda.get_device_properties(device_id)
                    msg_info += f"\tuse  cuda:{device_id}-【{cuda_msg.name}】 with {cuda_msg.total_memory / (1 << 20):.0f}MiB\r\n"
                device = f"cuda:{device_ids[0]}"
                if len(device_ids) == 1:
                    device_ids = None
            else:
                device_ids = None
                device = "cpu"
                msg_info += "CPU only"
        self.main_logger.warning(msg_info.strip())
        return device_ids, device

    def _save_model(self, epoch):
        save_opt = self.opt["save"]
        if (epoch + 1) % save_opt["save_interval_epoch"] == 0:
            save_model(epoch, self.model, self.optimizer, self.scheduler, save_opt)

    def _liner_warm_up(self):
        if self.warmup_flag:
            if self.iter_now == self.warm_up_iter:
                lr_now = self.init_lr
                self.warmup_flag = False
            else:
                lr_now = self.warm_init_lr + self.iter_now * self.warm_up_step
            set_optimizer_lr(self.optimizer, lr_now)

    def retrain(self):
        retrain_model_path = self.train_opt.get("retrain", None)
        if retrain_model_path is None:
            return "retrain not be loaded"

        match = re.search(r"(.+)_epoch(\d+)\.pth", os.path.basename(retrain_model_path))
        global_name = self.opt["global"]["name"]
        if match:
            model_name = match.group(1)
            epoch_now = int(match.group(2))
            self.main_logger.info(f"{model_name} retrain from {epoch_now}")
            if model_name != global_name:
                self.main_logger.warning(f"{model_name} and opt global name is not match, {model_name} will  saved to {global_name} dir")
                self.opt["save"]["save_name"] = model_name
            self.start_epoch = epoch_now + 1
        else:
            self.main_logger(f"{retrain_model_path} not match model name and epoch, and it will retrain with {global_name} name and save to {global_name} from epoch{0}")
            self.start_epoch = 0

        state_dict = torch.load(retrain_model_path)
        if "net" in state_dict.keys():
            load_model_parameter(self.model, retrain_model_path)
            self.main_logger.info(f"load model from {retrain_model_path} for retrain")
        else:
            self.main_logger.warning(f"retrain model is not load case \"net\" key not in {retrain_model_path}")

        if "optimizer" in state_dict.keys():
            load_optimizer_parameter(self.optimizer, retrain_model_path)
            for state in self.optimizer.state.values():         # 避免加载的设备出错
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.to(self.device)
            self.main_logger.info(f"load optimizer from {retrain_model_path} for retrain")
        else:
            self.main_logger.warning(f"retrain optimizer is not load case \"optimizer\" key not in {retrain_model_path}")

        if "scheduler" in state_dict.keys():
            load_scheduler_parameter(self.scheduler, retrain_model_path)
            self.main_logger.info(f"load scheduler from {retrain_model_path} for retrain")
        else:
            self.main_logger.warning(f"scheduler model is not load case \"scheduler\" key not in {retrain_model_path}")

        if self.warmup_flag:
            self.warmup_flag = False
            self.main_logger.warning(f"warmup not used case retrain")



    @staticmethod
    def clear_root_logger():
        # 避免pytorch_quant 的root日志重复输出
        root_logger = logging.getLogger()
        while root_logger.handlers:
            root_logger.removeHandler(root_logger.handlers[0])






