'''
修改训练组件：优化器、学习率调度器
适应雷达多模态输入的修改，coco评估不做调整
'''

import logging
import utils.gpu as gpu
from model.build_model import Build_Model
from model.loss.yolo_loss import YoloV4Loss
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import utils.datasets as data
import time
import random
import argparse
from ptflops import get_model_complexity_info
from utils.flops_counter import get_model_complexity_info_multimodal
from calflops import calculate_flops
from eval.evaluator import *
from utils.tools import *
from tensorboardX import SummaryWriter
import config.risfnet_config as cfg
from utils.log import Logger
from torch.cuda.amp import autocast as autocast
from eval_coco import *
from eval.cocoapi_evaluator import COCOAPIEvaluator

def detection_collate(batch):
    targets = []
    imgs = []
    for sample in batch:
        imgs.append(sample[0])
        targets.append(sample[1])
    return torch.stack(imgs, 0), targets


class Trainer(object):
    def __init__(self, weight_path=None,
                 ckpt_path=None,
                 resume=False,
                 gpu_id=0,
                 accumulate=1):
        init_seeds(0)
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.0
        self.accumulate = accumulate
        self.weight_path = weight_path
        self.ckpt_path = ckpt_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        self.showatt = cfg.TRAIN["showatt"]
        if self.multi_scale_train:
            print("Using multi scales training")
        else:
            print("train img size is {}".format(cfg.TRAIN["TRAIN_IMG_SIZE"]))
        self.train_dataset = data.Build_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"]
        )
        self.epochs = (
            cfg.TRAIN["EPOCHS"]
        )
        self.eval_epoch = (
            cfg.TRAIN["EVAL_EPOCH"] # 30 # eval start, <=epochs
        )
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True,
            pin_memory=True,
        )

        self.risfnet = Build_Model(weight_path=weight_path, resume=resume, showatt=self.showatt).to(
            self.device
        )

        print("Parameter Count: %d" % sum(p.numel() for p in self.risfnet.parameters() if p.requires_grad))

        if cfg.TRAIN["DATA_TYPE"] == 'FloW':
            # 使用ptflops库计算复杂度
            prepare_input = lambda _: {
                "x":  torch.FloatTensor(1, 3, 416, 416).to('cuda'),
                "y":  torch.FloatTensor(1, 3*cfg.TRAIN["RADAR_FRAME"], 416, 416).to('cuda')
            }
            macs, params = get_model_complexity_info(
                self.risfnet,
                input_res=(3, 416, 416),
                input_constructor=prepare_input,
                print_per_layer_stat=False
            )
            print(f'ptflops: {{ macs: {macs}, params: {params} }}')

            # 使用utils.flops_counter模块计算复杂度
            flops, params = get_model_complexity_info_multimodal(
                self.risfnet,
                input_res=((3, 416, 416), (3*cfg.TRAIN["RADAR_FRAME"], 416, 416)),
                print_per_layer_stat=False
            )
            print(f"counter: {{ flops: {flops}, params: {params} }}")

            # 使用calflops库计算复杂度
            flops, macs, params = calculate_flops(
                self.risfnet,
                args=[torch.FloatTensor(1, 3, 416, 416), torch.FloatTensor(1, 3*cfg.TRAIN["RADAR_FRAME"], 416, 416)],
                print_results=False, # Calculate Flops Results
                print_detailed=False # Detailed Calculated FLOPs Results
            )
            print(f"calflops: {{ flops: {flops}, macs: {macs}, params: {params} }}")

        self.optimizer = optim.Adam(
            self.risfnet.parameters(),
            lr=cfg.TRAIN["LR_INIT"],
            betas=(0.9, 0.999),
            weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
        )

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
        )

        self.scheduler = optim.lr_scheduler.StepLR(
            self.optimizer,
            step_size=1,
            gamma=0.9,
        )
        if resume:
            self.__load_resume_weights(weight_path)

    def __load_resume_weights(self, weight_path):
        last_weight = os.path.join(os.path.split(weight_path)[0], "last.pt")
        chkpt = torch.load(last_weight, map_location=self.device)
        self.risfnet.load_state_dict(chkpt["model"])

        self.start_epoch = chkpt["epoch"] + 1
        if chkpt["optimizer"] is not None:
            self.optimizer.load_state_dict(chkpt["optimizer"])
            self.best_mAP = chkpt["best_mAP"]
        del chkpt

    def __save_model_weights(self, epoch, mAP):
        if mAP > self.best_mAP:
            self.best_mAP = mAP
        best_weight = os.path.join(self.ckpt_path, "best.pt")
        last_weight = os.path.join(self.ckpt_path, "last.pt")
        chkpt = {
            "epoch": epoch,
            "best_mAP": self.best_mAP,
            "model": self.risfnet.state_dict(),
            "optimizer": self.optimizer.state_dict(),
        }
        torch.save(chkpt, last_weight)

        if self.best_mAP == mAP:
            torch.save(chkpt["model"], best_weight)

        if epoch > 0 and epoch % 10 == 0:
            torch.save(
                chkpt,
                os.path.join(
                    self.ckpt_path,
                    "backup_epoch%g.pt" % epoch,
                ),
            )
        del chkpt

    def train(self):
        global writer
        logger.info(
            "Training start, img size is: {:d}, batchsize is: {:d}, work number is {:d}".format(
                cfg.TRAIN["TRAIN_IMG_SIZE"],
                cfg.TRAIN["BATCH_SIZE"],
                cfg.TRAIN["NUMBER_WORKERS"],
            )
        )
        # logger.info(self.risfnet) # @@print net
        logger.info(
            "Train datasets number is : {}".format(len(self.train_dataset))
        )

        def is_valid_number(x):
            return not (math.isnan(x) or math.isinf(x) or x > 1e4)

        logger.info("        =======  start  training   ======     ")
        for epoch in range(self.start_epoch, self.epochs):
            start = time.time()
            self.risfnet.train()

            mloss = torch.zeros(4)
            logger.info("===Epoch:[{}/{}]===".format(epoch, self.epochs))
            for i, (
                imgs,
                radars,
                label_sbbox,
                label_mbbox,
                label_lbbox,
                sbboxes,
                mbboxes,
                lbboxes,
            ) in enumerate(self.train_dataloader):
                imgs = imgs.to(self.device)
                radars = radars.to(self.device)
                label_sbbox = label_sbbox.to(self.device)
                label_mbbox = label_mbbox.to(self.device)
                label_lbbox = label_lbbox.to(self.device)
                sbboxes = sbboxes.to(self.device)
                mbboxes = mbboxes.to(self.device)
                lbboxes = lbboxes.to(self.device)

                with autocast():
                    p, p_d = self.risfnet(imgs, radars)

                    loss, loss_ciou, loss_conf, loss_cls = self.criterion(
                        p,
                        p_d,
                        label_sbbox,
                        label_mbbox,
                        label_lbbox,
                        sbboxes,
                        mbboxes,
                        lbboxes,
                    )

                if is_valid_number(loss.item()):
                    loss.backward()

                # Accumulate gradient for x batches before optimizing
                if i % self.accumulate == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()

                # Update scheduler every epoch
                if i == len(self.train_dataloader) - 1:
                    self.scheduler.step()

                # Update running mean of tracked metrics
                loss_items = torch.tensor(
                    [loss_ciou, loss_conf, loss_cls, loss]
                )
                mloss = (mloss * i + loss_items) / (i + 1)

                # Print batch results
                if i % 10 == 0:

                    logger.info(
                        "  === Epoch:[{:3}/{}], step:[{:3}/{}], img_size:[{:3}], total_loss:{:.4f}|loss_ciou:{:.4f}|loss_conf:{:.4f}|loss_cls:{:.4f}|lr:{:.8f}".format(
                            epoch, self.epochs, i, len(self.train_dataloader) - 1, self.train_dataset.img_size,
                            mloss[3], mloss[0], mloss[1], mloss[2], self.optimizer.param_groups[0]["lr"],
                        )
                    )
                    writer.add_scalar("loss_ciou", mloss[0], len(self.train_dataloader) * epoch + i)
                    writer.add_scalar("loss_conf", mloss[1], len(self.train_dataloader) * epoch + i)
                    writer.add_scalar("loss_cls", mloss[2], len(self.train_dataloader) * epoch + i)
                    writer.add_scalar("train_loss", mloss[3], len(self.train_dataloader) * epoch + i)

                # multi-sclae training (320-608 pixels) every 10 batches
                if self.multi_scale_train and (i + 1) % 10 == 0:
                    self.train_dataset.img_size = (random.choice(range(10, 20)) * 32)

            if (
                cfg.TRAIN["DATA_TYPE"] == "FloW"
                or cfg.TRAIN["DATA_TYPE"] == "Customer"
            ):
                mAP = 0.0
                if epoch >= self.eval_epoch:
                    logger.info(
                        "===== Validate =====".format(epoch, self.epochs)
                    )
                    logger.info("val img size is {}".format(cfg.VAL["TEST_IMG_SIZE"]))
                    with torch.no_grad():
                        APs, inference_time = Evaluator(
                            self.risfnet, showatt=self.showatt
                        ).APs_voc(iou_thresh=0.5) # MAP_IOU_THRESH=0.5 while training
                        for i in APs:
                            logger.info("{} --> mAP@0.5:{}".format(i, APs[i]))
                            mAP += APs[i]
                        mAP = mAP / self.train_dataset.num_classes
                        logger.info("mAP@0.5:{}".format(mAP))
                        logger.info(
                            "inference time: {:.2f} ms".format(inference_time)
                        )
                        writer.add_scalar("mAP@0.5", mAP, epoch)
                        self.__save_model_weights(epoch, mAP)
                        logger.info("save weights done")
                    logger.info("  ===test mAP@0.5:{:.3f}".format(mAP))
            elif epoch >= 0 and cfg.TRAIN["DATA_TYPE"] == "COCO": # @@not used for radar input
                evaluator = COCOAPIEvaluator(
                    model_type="RISFNet",
                    data_dir=cfg.DATA_PATH,
                    img_size=cfg.VAL["TEST_IMG_SIZE"],
                    confthre=0.08,
                    nmsthre=cfg.VAL["NMS_THRESH"],
                )
                ap50_95, ap50 = evaluator.evaluate(self.risfnet)
                logger.info("ap50_95:{}|ap50:{}".format(ap50_95, ap50))
                writer.add_scalar("val/COCOAP50", ap50, epoch)
                writer.add_scalar("val/COCOAP50_95", ap50_95, epoch)
                self.__save_model_weights(epoch, ap50)
                print("save weights done")
            end = time.time()
            logger.info("  ===cost time: {:.4f} s".format(end - start))
        logger.info("=====Training Finished.   best_test_mAP@0.5:{:.3f}%====".format(self.best_mAP))


if __name__ == "__main__":
    os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
    # global logger, writer

    parser = argparse.ArgumentParser()
    parser.add_argument("--weight_path", type=str, default="weight/yolov4.weights", help="weight file path")
    parser.add_argument("--log_path", type=str, default="output/"+cfg.TRAIN["DATA_TYPE"]+"/log/", help="log path")
    parser.add_argument("--ckpt_path", type=str, default="output/"+cfg.TRAIN["DATA_TYPE"]+"/", help="ckpt path")
    parser.add_argument("--resume", action="store_true", default=False, help="resume training flag")
    parser.add_argument("--gpu_id", type=int, default=0, help="whither use GPU(0) or CPU(-1)")
    parser.add_argument("--accumulate", type=int, default=1, help="batches to accumulate before optimizing")

    opt = parser.parse_args()
    torch.backends.cudnn.benchmark=True

    writer = SummaryWriter(logdir=opt.log_path + "/event")
    logger = Logger(
        log_file_name=opt.log_path + "/log.txt",
        log_level=logging.DEBUG,
        logger_name="RISFNet",
    ).get_log()

    Trainer(
        weight_path=opt.weight_path,
        ckpt_path=opt.ckpt_path,
        resume=opt.resume,
        gpu_id=opt.gpu_id,
        accumulate=opt.accumulate,
    ).train()
