import utils.gpu as gpu
from model.build_model import Build_Model
from utils.tools import *
from eval.evaluator import Evaluator
import argparse
import time
import logging
import config.risfnet_config as cfg
from utils.visualize import *
from utils.torch_utils import *
from utils.log import Logger


class Evaluation(object):
    def __init__(
        self,
        gpu_id=0,
        weight_path=None,
        visiual=None,
        eval=False,
        mode=None
    ):
        # self.__num_class = cfg.VOC_DATA["NUM"] # original code
        self.__num_class = cfg.FloW_DATA["NUM"] if cfg.TRAIN["DATA_TYPE"] == "FloW" else cfg.Customer_DATA["NUM"] # @@add Customer data check
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__showatt = cfg.TRAIN["showatt"]

        self.__visiual = visiual
        self.__mode = mode
        # self.__classes = cfg.VOC_DATA["CLASSES"] # original code
        self.__classes = cfg.FloW_DATA["CLASSES"] if cfg.TRAIN["DATA_TYPE"] == "FloW" else cfg.Customer_DATA["CLASSES"] # @@add Customer data check

        self.__model = Build_Model(showatt=self.__showatt).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=self.__showatt)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        # self.__model.load_state_dict(chkpt["model"]) # original code
        if "model" in chkpt: self.__model.load_state_dict(chkpt["model"]) # @@check key in dict
        else: self.__model.load_state_dict(chkpt) # @@load model directly
        print("loading weight file is done")
        del chkpt

    def val(self):
        global logger
        logger.info("***********Start Evaluation****************")
        start = time.time()
        mAP = 0
        with torch.no_grad():
            APs, inference_time = Evaluator(
                self.__model, showatt=False
            ).APs_voc(iou_thresh=cfg.VAL["MAP_IOU_THRESH"])
            for i in APs:
                logger.info("{} --> mAP@{}:{}".format(i, cfg.VAL["MAP_IOU_THRESH"], APs[i]))
                mAP += APs[i]
            mAP = mAP / self.__num_class
            logger.info("mAP@{}:{}".format(cfg.VAL["MAP_IOU_THRESH"], mAP))
            logger.info("inference time: {:.2f} ms".format(inference_time))
        end = time.time()
        logger.info("  ===val cost time: {:.4f} s".format(end - start))

    def detection(self):
        global logger
        if self.__visiual:
            # imgs = os.listdir(self.__visiual) # original code

            # @@check visiual path, txt or dir
            if ".txt" in self.__visiual:
                with open(self.__visiual, 'r') as f:
                    lines = [x.strip() for x in f.readlines()]
                    lines = [x.split(' ') for x in lines]
                imgs = [lin[0] for lin in lines]
                self.__visiual = os.path.dirname(self.__visiual)
            else: imgs = os.listdir(self.__visiual)

            logger.info("***********Start Detection****************")
            for v in imgs:
                os.makedirs(os.path.join("./detection_result", os.path.dirname(v)), exist_ok=True) # @@make ./detection_result/xx
                path = os.path.join(self.__visiual, v)
                logger.info("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                # @@add radar
                radar_path = cfg.get_radar_path(path)
                radar = cv2.imread(radar_path)
                assert radar is not None

                bboxes_prd = self.__evalter.get_bbox(img, radar, v, mode=self.__mode)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(
                        image=img,
                        boxes=boxes,
                        labels=class_inds,
                        probs=scores,
                        class_labels=self.__classes,
                    )
                    path = os.path.join(
                        cfg.PROJECT_PATH, "detection_result/{}".format(v)
                    )

                    cv2.imwrite(path, img)
                    logger.info("saved images : {}".format(path))


if __name__ == "__main__":
    os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
    # global logger

    parser = argparse.ArgumentParser()
    parser.add_argument("--weight_path", type=str, default="weight/best.pt", help="weight file path")
    parser.add_argument("--log_val_path", type=str, default="log_val", help="val log file path")
    parser.add_argument("--gpu_id", type=int, default=-1, help="whither use GPU(0) or CPU(-1)")
    parser.add_argument("--visiual", type=str, default=os.path.join(cfg.DATA_PATH+"test.txt"), help="det data path or list or None") # img dir or name list only for detection
    parser.add_argument("--mode", type=str, default="val", help="val or det")

    opt = parser.parse_args()

    if not os.path.exists(opt.log_val_path):
        os.mkdir(opt.log_val_path)
    logger = Logger(
        log_file_name=opt.log_val_path + "/log_val.txt",
        log_level=logging.DEBUG,
        logger_name="RISFNet",
    ).get_log()

    if opt.mode == "val":
        Evaluation(
            gpu_id=opt.gpu_id,
            weight_path=opt.weight_path,
            visiual=opt.visiual,
            mode=opt.mode
        ).val()
    else:
        Evaluation(
            gpu_id=opt.gpu_id,
            weight_path=opt.weight_path,
            visiual=opt.visiual,
            mode=opt.mode
        ).detection()
