"""
author: LSH9832
reference: https://github.com/Megvii-BaseDetection/YOLOX
"""

import datetime
import os
import shutil
import time
import json
from loguru import logger

import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter

from yolox.data import DataPrefetcher
from yolox.utils import (
    MeterBuffer,
    ModelEMA,
    all_reduce_norm,
    get_local_rank,
    get_model_info,
    get_rank,
    get_world_size,
    gpu_mem_usage,
    is_parallel,
    load_ckpt,
    occupy_mem,
    save_checkpoint,
    setup_logger,
    synchronize
)


class Trainer:

    start_epoch = 0         # 起始次数
    epoch = 0               # 当前次数
    now_epoch_dir = None
    this_epoch_msg = None

    def __init__(self, exp, msg_data):
        # init function only defines some basic attr, other attrs like model, optimizer are built in
        # before_train methods.
        self.exp = exp

        self.msg_data = msg_data

        # training related attr
        self.save_each_epoch = 'save_each_epoch' in self.msg_data and self.msg_data['save_each_epoch']
        self.max_epoch = self.msg_data['epochs']
        self.amp_training = self.msg_data["fp16"]
        self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp_training)
        self.is_distributed = get_world_size() > 1
        self.rank = get_rank()
        self.local_rank = get_local_rank()
        self.device = "cuda:{}".format(self.local_rank)
        self.use_model_ema = exp.ema

        # data/dataloader related attr
        self.data_type = torch.float16 if self.amp_training else torch.float32
        self.input_size = exp.input_size
        self.best_ap = 0

        # metric record
        self.meter = MeterBuffer(window_size=exp.print_interval)
        self.file_name = os.path.join(self.msg_data['output_dir'], self.msg_data['train_name'])

        if self.rank == 0:
            os.makedirs(self.file_name, exist_ok=True)

        setup_logger(
            self.file_name,
            distributed_rank=self.rank,
            filename="train_log.txt",
            mode="a",
        )

    def train(self):
        pid_file = os.path.join(self.file_name, 'pid')
        os.popen('echo %s >> %s' % (str(os.getpid()), pid_file))

        os.makedirs(os.path.join(self.file_name, 'epochs'), exist_ok=True)

        self.before_train()
        try:
            self.train_in_epoch()
        except Exception:
            raise
        finally:
            self.after_train()

    def train_in_epoch(self):
        for self.epoch in range(self.start_epoch, self.max_epoch):
            self.before_epoch()
            self.train_in_iter()
            self.after_epoch()

    def train_in_iter(self):
        for self.iter in range(self.max_iter):
            self.before_iter()
            self.train_one_iter()
            self.after_iter()

    def train_one_iter(self):
        iter_start_time = time.time()

        inps, targets = self.prefetcher.next()
        inps = inps.to(self.data_type)
        targets = targets.to(self.data_type)
        targets.requires_grad = False
        inps, targets = self.exp.preprocess(inps, targets, self.input_size)
        data_end_time = time.time()

        with torch.cuda.amp.autocast(enabled=self.amp_training):
            outputs = self.model(inps, targets)

        loss = outputs["total_loss"]

        self.optimizer.zero_grad()
        self.scaler.scale(loss).backward()
        self.scaler.step(self.optimizer)
        self.scaler.update()

        if self.use_model_ema:
            self.ema_model.update(self.model)

        lr = self.lr_scheduler.update_lr(self.progress_in_iter + 1)
        for param_group in self.optimizer.param_groups:
            param_group["lr"] = lr

        iter_end_time = time.time()
        self.meter.update(
            iter_time=iter_end_time - iter_start_time,
            data_time=data_end_time - iter_start_time,
            lr=lr,
            **outputs,
        )

    def before_train(self):
        logger.info("exp value:\n{}".format(self.exp))
        logger.info("data value:\n{}".format(self.msg_data))


        # model related init
        torch.cuda.set_device(self.local_rank)
        model = self.exp.get_model()
        logger.info(
            "Model Summary: {}".format(get_model_info(model, self.exp.test_size))
        )
        model.to(self.device)

        # solver related init
        self.optimizer = self.exp.get_optimizer(self.msg_data['batch_size'])

        # value of epoch will be set in `resume_train`
        model, train_head_only = self.resume_train(model)

        # data related init
        self.no_aug = self.start_epoch >= self.max_epoch - self.exp.no_aug_epochs
        self.train_loader = self.exp.get_data_loader(
            batch_size=self.msg_data['batch_size'],
            is_distributed=self.is_distributed,
            no_aug=self.no_aug,
            cache_img=False,
        )
        logger.info("init prefetcher, this might take one minute or less...")
        self.prefetcher = DataPrefetcher(self.train_loader)
        # max_iter means iters per epoch
        self.max_iter = len(self.train_loader)

        self.lr_scheduler = self.exp.get_lr_scheduler(
            self.exp.basic_lr_per_img * self.msg_data['batch_size'], self.max_iter
        )
        if "occupy" in self.msg_data and self.msg_data['occupy']:
            occupy_mem(self.local_rank)

        if self.is_distributed:
            model = DDP(model, device_ids=[self.local_rank], broadcast_buffers=False, find_unused_parameters=True)

        if self.use_model_ema:
            self.ema_model = ModelEMA(model, 0.9998)
            self.ema_model.updates = self.max_iter * self.start_epoch

        self.model = model
        if train_head_only and not self.is_distributed:
            self.model.backbone.eval()
            self.model.head.train()
        else:
            self.model.train()

        # import time
        # print("classes:", self.exp.num_classes)
        # time.sleep(1000)
        self.evaluator = self.exp.get_evaluator(
            batch_size=self.msg_data['batch_size'], is_distributed=self.is_distributed
        )
        # Tensorboard logger
        if self.rank == 0:
            self.tblogger = SummaryWriter(self.file_name)

        logger.info("Training start...")
        logger.info("\n{}".format(model))

    def after_train(self):
        logger.info(
            "Training of experiment is done and the best AP is {:.2f}".format(self.best_ap * 100)
        )

    def before_epoch(self):
        logger.info("---> start train epoch{}".format(self.epoch + 1))
        #################################################################
        # 创建当前epoch的训练信息存储文件夹
        if self.rank == 0:
            os.popen('echo %d > %s' % (self.epoch + 1, os.path.join(self.file_name, 'now_epoch')))
        self.now_epoch_dir = os.path.join(self.file_name, 'epochs', ('%d' % (self.epoch + 1)).zfill(4))
        if os.path.exists(self.now_epoch_dir):
            shutil.rmtree(self.now_epoch_dir)
        os.makedirs(self.now_epoch_dir, exist_ok=True)
        # 当前epoch训练信息
        self.this_epoch_msg = {
            "total_loss": [],
            "iou_loss": [],
            "conf_loss": [],
            "cls_loss": [],
            "l1_loss": []
        }
        #################################################################

        if self.epoch + 1 == self.max_epoch - self.exp.no_aug_epochs or self.no_aug:
            logger.info("--->No mosaic aug now!")
            self.train_loader.close_mosaic()
            logger.info("--->Add additional L1 loss now!")
            if self.is_distributed:
                self.model.module.head.use_l1 = True
            else:
                self.model.head.use_l1 = True
            self.exp.eval_interval = 1
            if not self.no_aug:
                self.save_ckpt(ckpt_name="last_mosaic_epoch")

    def after_epoch(self):
        self.save_ckpt(ckpt_name="latest")

        #############################################

        # 将最终的平均loss信息保存下来
        for loss_name in self.this_epoch_msg:
            length = len(self.this_epoch_msg[loss_name])
            self.this_epoch_msg[loss_name] = sum(self.this_epoch_msg[loss_name]) / length
        if self.rank == 0:
            os.popen('echo %s > %s' % (json.dumps(self.this_epoch_msg), os.path.join(self.now_epoch_dir, 'final')))

        #############################################
        # 每隔eval_interval个epoch进行一次验证
        if (self.epoch + 1) % self.exp.eval_interval == 0:
            all_reduce_norm(self.model)
            self.evaluate_and_save_model()

    def before_iter(self):
        pass

    def after_iter(self):
        """
        `after_iter` contains two parts of logic:
            * log information
            * reset setting of resize
        """
        # log needed information
        if (self.iter + 1) % self.exp.print_interval == 0:
            # TODO check ETA logic
            left_iters = self.max_iter * self.max_epoch - (self.progress_in_iter + 1)
            eta_seconds = self.meter["iter_time"].global_avg * left_iters
            eta_str = "ETA: {}".format(datetime.timedelta(seconds=int(eta_seconds)))


            progress_str = "epoch: {}/{}, iter: {}/{}".format(
                self.epoch + 1, self.max_epoch, self.iter + 1, self.max_iter
            )
            loss_meter = self.meter.get_filtered_meter("loss")
            loss_str = ", ".join(
                ["{}: {:.1f}".format(k, v.latest) for k, v in loss_meter.items()]
            )

            time_meter = self.meter.get_filtered_meter("time")
            time_str = ", ".join(
                ["{}: {:.3f}s".format(k, v.avg) for k, v in time_meter.items()]
            )

            ##############################################################
            logger.info(
                "{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}".format(
                    progress_str,
                    gpu_mem_usage(),
                    time_str,
                    loss_str,
                    self.meter["lr"].latest,
                )
                + (", size: {:d}, {}".format(self.input_size[0], eta_str))
            )

            ###############################################################
            all_msg = {
                'now_epoch': self.epoch + 1,
                'total_epoch': self.max_epoch,
                'now_iter': self.iter + 1,
                'total_iter': self.max_iter,
                'memory_use': int(gpu_mem_usage()),
                'lr': '%.3e' % self.meter["lr"].latest,
            }

            for k, v in loss_meter.items():
                all_msg[k] = float('%.2f' % v.latest)

            for loss_name in self.this_epoch_msg:
                self.this_epoch_msg[loss_name].append(all_msg[loss_name])

            # print(json.dumps(all_msg))
            if self.rank == 0:
                os.popen('echo %s >> %s' % (json.dumps(all_msg), os.path.join(self.now_epoch_dir, 'total')))
                os.popen('echo %s > %s' % (json.dumps(all_msg), os.path.join(self.now_epoch_dir, 'latest')))
            ###############################################################




            self.meter.clear_meters()

        # random resizing
        if (self.progress_in_iter + 1) % 10 == 0:
            self.input_size = self.exp.random_resize(
                self.train_loader, self.epoch, self.rank, self.is_distributed
            )

    @property
    def progress_in_iter(self):
        return self.epoch * self.max_iter + self.iter

    def resume_train(self, model):
        self.start_epoch = 0
        train_head_only = False
        if self.msg_data['use_pretrained_weight']:
            logger.info("using pretrained weight")

            ckpt_file = self.msg_data['pretrained_weight_file']

            ckpt = torch.load(ckpt_file, map_location=self.device)
            # resume the model/optimizer state dict
            
            if "model" in ckpt:
                model.load_state_dict(ckpt["model"])
            else:
                if "backbone" in ckpt:
                    model.backbone.load_state_dict(ckpt["backbone"])
                    train_head_only = True
                if "head" in ckpt:
                    try:
                        model.head.load_state_dict(ckpt["head"])
                    except:
                        logger.info("head not match, skip loading head weight")
            
            self.optimizer.load_state_dict(ckpt["optimizer"]) if "optimizer" in ckpt else None
            # resume the training states variables
            start_epoch = (
                self.msg_data['start_epoch'] - 0
                if 'start_epoch' in self.msg_data
                else 0
            )
            self.start_epoch = start_epoch
            logger.info(
                "loaded checkpoint '{}' (epoch {})".format(
                    True, self.start_epoch
                )
            )
        return model, train_head_only

    def evaluate_and_save_model(self):
        if self.use_model_ema:
            evalmodel = self.ema_model.ema
        else:
            evalmodel = self.model
            if is_parallel(evalmodel):
                evalmodel = evalmodel.module

        ap50_95, ap50, summary = self.exp.eval(
            evalmodel, self.evaluator, self.is_distributed
        )
        eval_msg = {
            "ap50_95": float(ap50_95),
            "ap50": float(ap50),
            "now_epoch": self.epoch + 1,
        }
        if self.rank == 0:
            os.popen('echo %s >> %s' % (json.dumps(eval_msg), os.path.join(self.file_name, 'eval')))
        self.model.train()
        if self.rank == 0:
            self.tblogger.add_scalar("val/COCOAP50", ap50, self.epoch + 1)
            self.tblogger.add_scalar("val/COCOAP50_95", ap50_95, self.epoch + 1)
            logger.info("\n" + summary)
        synchronize()
        if ap50_95 > self.best_ap:
            self.save_ckpt("best", True)            # 当本次训练效果为最佳时更新最佳权重文件
        self.best_ap = max(self.best_ap, ap50_95)

    def save_ckpt(self, ckpt_name, update_best_ckpt=False):
        if self.rank == 0:
            save_model = self.ema_model.ema if self.use_model_ema else self.model
            logger.info("Save weights to {}".format(self.file_name))
            ckpt_state = {
                "start_epoch": self.epoch + 1,
                "model": save_model.state_dict(),
                "optimizer": self.optimizer.state_dict(),
            }
            devide_ckpt = {
                "backbone":{"backbone": save_model.backbone.state_dict()},
                "head":{"head": save_model.head.state_dict()}
            }
            
            save_checkpoint(
                state=ckpt_state,
                is_best=update_best_ckpt,
                save_dir=self.file_name,
                model_name=ckpt_name,
                epoch=(self.epoch + 1) if self.save_each_epoch else None
            )
            
            save_checkpoint(
                state=devide_ckpt,
                is_best=update_best_ckpt,
                save_dir=self.file_name,
                model_name=ckpt_name,
                epoch=(self.epoch + 1) if self.save_each_epoch else None,
                is_devided=True,
            )
            
            
            
            
