#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import logging
import json

from detectron2 import model_zoo
from detectron2.engine import launch
from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.evaluation import COCOEvaluator, inference_on_dataset

from .dataset_utils import get_det_dicts, get_icg_dicts, get_oku_dicts, get_vis_dicts
from .retinanet_ptl import RetinaNetPTL
from .detector_utils import PTLTrainer, PTLDatasetMapper
from pathlib import Path


def add_det_config(cfg, opt):
    # Additional configs
    ## Training sizes for uav-based images
    cfg.INPUT.MIN_SIZE_TRAIN_UAV = tuple(opt.min_size_train_uav_det)
    cfg.INPUT.MAX_SIZE_TRAIN_UAV = opt.max_size_train_uav_det
    ## Training sizes for synthetic images
    cfg.INPUT.MIN_SIZE_TRAIN_SYN = tuple(opt.min_size_train_syn_det)
    cfg.INPUT.MAX_SIZE_TRAIN_SYN = opt.max_size_train_syn_det
    
    
def train_main(opt):
    # Setup path
    det_dataset_root = os.path.join("./datasets", opt.exp_id, "det")
    cur_det_dataset = os.path.join(det_dataset_root, opt.cur_ptl_iter)

    if opt.suffix == "":
        output_path = os.path.join("./models", opt.exp_id, "det", opt.cur_ptl_iter)
        DatasetCatalog.register("det_train", lambda: get_det_dicts(dataset_path=cur_det_dataset))
    else:
        output_path = os.path.join("./models", opt.exp_id, "det", opt.cur_ptl_iter, opt.suffix)
        DatasetCatalog.register("det_train", lambda: get_det_dicts(dataset_path=cur_det_dataset, name="dicts_" + opt.suffix + ".json"))
    MetadataCatalog.get("det_train").thing_classes = ['person']

    # Setup cfg
    ## Default cfg
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_50_FPN_1x.yaml"))
    
    ## PTL uses a slightly modified RetinaNet
    cfg.MODEL.META_ARCHITECTURE = "RetinaNetPTL"

    if opt.pretrained_weights_det != "":
        cfg.MODEL.WEIGHTS = opt.pretrained_weights_det

    if opt.freeze_backbone_det:
        cfg.MODEL.BACKBONE.FREEZE_AT = 5

    if opt.progressive:
        prev_iter = "iter_" + str([int(i) - 1 for i in opt.cur_ptl_iter.split("_") if i.isdigit()][0])
        if opt.suffix == "":
            cfg.MODEL.WEIGHTS = os.path.join("./models", opt.exp_id, "det", prev_iter, "model_final.pth")
        else:
            cfg.MODEL.WEIGHTS = os.path.join("./models", opt.exp_id, "det", prev_iter, opt.suffix, "model_final.pth")

    cfg.DATASETS.TRAIN = ("det_train",)
    cfg.DATASETS.TEST = ()

    cfg.OUTPUT_DIR = output_path
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    cfg.DATALOADER.NUM_WORKERS = 4
    
    cfg.SOLVER.IMS_PER_BATCH = opt.batch_size_det
    cfg.SOLVER.BASE_LR = opt.base_lr_det
    cfg.SOLVER.STEPS = (opt.step_det,)
    cfg.SOLVER.MAX_ITER = opt.n_iter_det
    cfg.SOLVER.CHECKPOINT_PERIOD = opt.checkpoint_period
    cfg.MODEL.RETINANET.NUM_CLASSES = 1
    cfg.MODEL.RETINANET.NUM_CONVS = 2

    cfg.INPUT.MIN_SIZE_TRAIN = tuple(opt.min_size_train_det)
    cfg.INPUT.MAX_SIZE_TRAIN = opt.max_size_train_det

    cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS = opt.filter_empty_annotations_det

    ## If this is true, then our syn and real data will be put in different batches
    cfg.DATALOADER.ASPECT_RATIO_GROUPING = opt.aspect_ratio_grouping_det

    # Add configs
    add_det_config(cfg, opt)

    # Setup trainer
    trainer = PTLTrainer(cfg) 
    trainer.resume_or_load(resume=False)
    
    return trainer.train()
    
    
def train_det(opt):

    launch(
        train_main,
        opt.n_gpus,
        num_machines=opt.num_machines,
        machine_rank=opt.machine_rank,
        dist_url=opt.dist_url,
        args=(opt,),
    )
    
    
def test_det(opt, test_dataset_type, test_dataset_root):
    
    logging.basicConfig(level=logging.INFO)

    # Setup model
    model_id = "final"
    model_root = os.path.join("./models", opt.exp_id, "det")

    if opt.suffix == "":
        model_weight = os.path.join(model_root, opt.cur_ptl_iter, "model_" + model_id + ".pth")
    else:
        model_weight = os.path.join(model_root, opt.cur_ptl_iter, opt.suffix, "model_" + model_id + ".pth")   

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_50_FPN_1x.yaml"))
    cfg.MODEL.META_ARCHITECTURE = "RetinaNetPTL"
    cfg.MODEL.RETINANET.NUM_CLASSES = 1
    cfg.MODEL.RETINANET.NUM_CONVS = 2
    cfg.INPUT.MIN_SIZE_TEST = opt.min_size_test_det
    cfg.INPUT.MAX_SIZE_TEST = opt.max_size_test_det
    
    # Add configs
    add_det_config(cfg, opt)

    model = build_model(cfg)
    DetectionCheckpointer(model).load(model_weight)
    model.eval()

    # Evaluation
    ## Output path
    if opt.suffix == "":
        output_path = os.path.join("./results", opt.exp_id, "det", opt.cur_ptl_iter, test_dataset_type)   
    else:
        output_path = os.path.join("./results", opt.exp_id, "det", opt.cur_ptl_iter, opt.suffix, test_dataset_type)
    Path(output_path).mkdir(parents=True, exist_ok=True)

    ## Register the test dataset
    ### Check if the test dataset has been registered
    if test_dataset_type not in DatasetCatalog.list():
        if test_dataset_type == "icg":
            DatasetCatalog.register(test_dataset_type, lambda: get_icg_dicts(dataset_root=test_dataset_root, mode="Test"))
            MetadataCatalog.get(test_dataset_type).thing_classes = ['person']
        elif test_dataset_type == "oku":
            DatasetCatalog.register(test_dataset_type, lambda: get_oku_dicts(dataset_root=test_dataset_root, mode="Test-Set",
                                                                  sampling_rate=10, scaling_ratio=3, ignore_occ=False, ignore_lost=True))    
            MetadataCatalog.get(test_dataset_type).thing_classes = ['person']
        else:
            DatasetCatalog.register(test_dataset_type, lambda: get_vis_dicts(dataset_root=test_dataset_root, mode="test-dev"))  
            MetadataCatalog.get(test_dataset_type).thing_classes = ['person']

    evaluator = COCOEvaluator(test_dataset_type, ('bbox',), distributed=False, output_dir=output_path)
    data_loader = build_detection_test_loader(cfg, test_dataset_type, mapper=PTLDatasetMapper(cfg, is_train=False))
    results = inference_on_dataset(model, data_loader, evaluator)

    with open(os.path.join(output_path, "results.json"), "w") as f:
        f.write(json.dumps(results)) 