"""
This file is the main script for repeatability evaluation.
Please use another script for sAP and mAPH_simple.
"""
import shutup
shutup.please()
import sys
import os
current_dir = os.path.dirname(__file__)
project_root = os.path.abspath(os.path.join(current_dir, '..'))
sys.path.append(project_root)

import torch
import copy
from dataset.dataset_util import get_dataset
from evaluation.line_detect_eval import  evaluate_repeatability_v2
import yaml
import numpy as np
import pandas
from os.path import join as osj

# from third_party.lcnn import lcnn
from model.line_detector_val import (
    LineDetectorOurs2,
)

###############################################################
## ToDo: This import path must be consistent with the one in ##
## third_party/lcnn/lcnn/models/multitask_learner.py         ##
## Otherwise, the object won't be updated                    ##
###############################################################

# Some torch configuration
torch.cuda.empty_cache()
torch.backends.cudnn.benchmark = True

if __name__ == "__main__":
    # All the configuration paths
    dataset_cfg_path = "./config/wireframe_official_gt_config.yaml"
    # dataset_cfg_path = "./config/yorkurban_dataset_config.yaml"

    model_cfg_path = "./experiments/sold2_synth/model_cfg.yaml"


    dist_tolerance_lst = [5]

    # Get the dataset
    with open(dataset_cfg_path, "r") as f:
        dataset_cfg = yaml.safe_load(f)

        # Grayscale dataset
    dataset, _ = get_dataset("test", dataset_cfg)
    # Color dataset
    dataset_cfg_color = copy.copy(dataset_cfg)
    dataset_cfg_color["gray_scale"] = False
    dataset_color, _ = get_dataset("test", dataset_cfg_color)

    # Get the model
    if torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")


    # exp_path = cfg.EXP_PATH
    # checkpoint_path = os.path.join(exp_path, "msagl_V2_混合下采样相加_10epoch_2gpu/checkpoint-epoch009-end.tar")
    checkpoint_path = './experiments/sold2_synth/checkpoint-epoch029-end.tar'

    # Fetch epoch info
    epoch_name = checkpoint_path.split("epoch")[-1].split("-end")[0]
    print(f"[Debug] {epoch_name}")

    with open(model_cfg_path, "r") as f:
        model_cfg = yaml.safe_load(f)

    # line_detector_cfg = {
    #     "detect_thresh": 0.25,
    #     "num_samples": 64,
    #     "sampling_method": "local_max",
    #     "inlier_thresh": 0.75,
    #     "use_long_line_nms": False,
    #     "use_candidate_suppression": True,
    #     "nms_dist_tolerance": 3.,
    #     "use_heatmap_refinement": True,
    #     "heatmap_refine_cfg": {
    #         "mode": "local",
    #         "ratio": 0.2,
    #         "valid_thresh": 0.1,
    #         "num_blocks": 20,
    #         "overlap_ratio": 0.5
    #     },
    #     "use_junction_refinement": False,
    #     "junction_refine_cfg": {
    #         "num_perturbs": 7,
    #         "perturb_interval": 0.25,
    #     }
    # }
    line_detector_cfg = {
        "detect_thresh": [0.25, 0.3, 0.35, 0.4, 0.45, 0.475, 0.5, 0.525, 0.55, 0.575, 0.6, 0.625, 0.65, 0.7, 0.75, 0.8],
        "num_samples": 64,
        "sampling_method": "local_max",
        "inlier_thresh": [0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.725, 0.75, 0.775, 0.8, 0.825, 0.85, 0.875, 0.9, 0.925, 0.95, 0.975, 0.99],
        "use_long_line_nms": False,
        "use_candidate_suppression": True,
        "nms_dist_tolerance": 3.,
        "use_heatmap_refinement": True,
        "heatmap_refine_cfg": {
            "mode": "local",
            "ratio": 0.2,
            "valid_thresh": 0.5,
            "num_blocks": 20,
            "overlap_ratio": 0.5
        }
    }

    # Initialize the line detection method
    line_detector = LineDetectorOurs2(
        model_cfg, checkpoint_path, device,
        line_detector_cfg, use_short_line_nms=False
    )

    # Start the evaluation
    print("[Debug] Dist_tolerance=", dist_tolerance_lst)
    rep_results, loc_results, time_results, num_segments = evaluate_repeatability_v2(
        dataset, line_detector,
        dist_tolerance_lst,
        distance_metric="sAP",
        # distance_metric="orthogonal_distance",
        split="viewpoint",
        erode_border=False,
        erode_border_margin=2
    )

    # Print the results one by one
    num_detect_thresh = line_detector.line_detector.num_detect_thresh
    num_inlier_thresh = line_detector.line_detector.num_inlier_thresh
    if num_detect_thresh > 1 and num_inlier_thresh > 1:
        for detect_thresh_idx in range(num_detect_thresh):
            for inlier_thresh_idx in range(num_inlier_thresh):
                print("detect_thresh: ", line_detector.line_detector.detect_thresh[detect_thresh_idx])
                print("inlier_thresh: ", line_detector.line_detector.inlier_thresh[inlier_thresh_idx])
                print("Rep: ",
                      [np.mean(rep_results[_][detect_thresh_idx, inlier_thresh_idx, :]) for _ in dist_tolerance_lst])
                print("Loc: ",
                      [np.mean(loc_results[_][detect_thresh_idx, inlier_thresh_idx, :]) for _ in dist_tolerance_lst])
                print("Average number of segments: %f segs/image" % (
                    np.mean(num_segments[detect_thresh_idx, inlier_thresh_idx, :])))
                print("-------------------------------------------------------")
    elif num_detect_thresh == 1 and num_inlier_thresh > 1:
        detect_thresh_idx = 0
        for inlier_thresh_idx in range(num_inlier_thresh):
            print("detect_thresh: ", line_detector.line_detector.detect_thresh)
            print("inlier_thresh: ", line_detector.line_detector.inlier_thresh[inlier_thresh_idx])
            print("Rep: ",
                  [np.mean(rep_results[_][detect_thresh_idx, inlier_thresh_idx, :]) for _ in dist_tolerance_lst])
            print("Loc: ",
                  [np.mean(loc_results[_][detect_thresh_idx, inlier_thresh_idx, :]) for _ in dist_tolerance_lst])
            print("Average number of segments: %f segs/image" % (
                np.mean(num_segments[detect_thresh_idx, inlier_thresh_idx, :])))
            print("-------------------------------------------------------")
    elif num_detect_thresh > 1 and num_inlier_thresh == 1:
        inlier_thresh_idx = 0
        for detect_thresh_idx in range(num_detect_thresh):
            print("detect_thresh: ", line_detector.line_detector.detect_thresh[detect_thresh_idx])
            print("inlier_thresh: ", line_detector.line_detector.inlier_thresh)
            print("Rep: ",
                  [np.mean(rep_results[_][detect_thresh_idx, inlier_thresh_idx, :]) for _ in dist_tolerance_lst])
            print("Loc: ",
                  [np.mean(loc_results[_][detect_thresh_idx, inlier_thresh_idx, :]) for _ in dist_tolerance_lst])
            print("Average number of segments: %f segs/image" % (
                np.mean(num_segments[detect_thresh_idx, inlier_thresh_idx, :])))
            print("-------------------------------------------------------")
    else:
        inlier_thresh_idx = 0
        detect_thresh_idx = 0
        print("detect_thresh: ", line_detector.line_detector.detect_thresh)
        print("inlier_thresh: ", line_detector.line_detector.inlier_thresh)
        print("Rep: ", [np.mean(rep_results[_][detect_thresh_idx, inlier_thresh_idx, :]) for _ in dist_tolerance_lst])
        print("Loc: ", [np.mean(loc_results[_][detect_thresh_idx, inlier_thresh_idx, :]) for _ in dist_tolerance_lst])
        print("Average number of segments: %f segs/image" % (
            np.mean(num_segments[detect_thresh_idx, inlier_thresh_idx, :])))
        print("-------------------------------------------------------")

    # print("===============================")
    # for dist in dist_tolerance_lst:
    #     print("\t Rep-%02d: %f \t Loc-%02d: %f" % (dist, np.mean(rep_results[dist]),
    #             dist, np.mean(loc_results[dist])))
    # print("Rep: ", [np.mean(rep_results[_]) for _ in dist_tolerance_lst])
    # print("Loc: ", [np.mean(loc_results[_]) for _ in dist_tolerance_lst])
    # Display runtime
    print("\t average runtime: %f s/image" % (np.mean(time_results)))
    # Display average line segments
    print("\t average number of segments: %f segs/image" % (np.mean(num_segments)))

    # ========== Converting results to Excel =============
    # Fetch corresponding entries
    detect_thresh = line_detector.line_detector.detect_thresh
    if isinstance(detect_thresh, float):
        detect_thresh = [detect_thresh]
    inlier_thresh = line_detector.line_detector.inlier_thresh
    if isinstance(inlier_thresh, float):
        inlier_thresh = [inlier_thresh]
    # num_detect_thresh = len(detect_thresh)
    # num_inlier_thresh = len(inlier_thresh)

    num_dist_tolerance = len(dist_tolerance_lst)
    num_rows = num_detect_thresh * num_inlier_thresh
    num_columns = num_dist_tolerance + 4

    # import ipdb; ipdb.set_trace()
    # Declare the whole table
    rep_outputs = np.zeros([num_rows, num_columns], dtype=np.float32)
    loc_outputs = np.zeros([num_rows, num_columns], dtype=np.float32)
    for row_idx in range(num_rows):
        detect_idx = row_idx // num_inlier_thresh
        inlier_idx = row_idx % num_inlier_thresh
        rep = np.array([np.mean(rep_results[_][detect_idx, inlier_idx, :]) for _ in dist_tolerance_lst])
        loc = np.array([np.mean(loc_results[_][detect_idx, inlier_idx, :]) for _ in dist_tolerance_lst])

        row_temp = np.zeros([num_columns], dtype=np.float32)
        row_temp[0] = detect_thresh[detect_idx]
        row_temp[1] = inlier_thresh[inlier_idx]
        row_temp[2] = line_detector.line_detector.lambda_radius
        row_temp[3:num_dist_tolerance + 3] = rep
        row_temp[-1] = np.mean(num_segments[detect_idx, inlier_idx, :])

        # Record repeatability results
        rep_outputs[row_idx, :] = row_temp.copy()
        # Recrod localization results
        row_temp[3:num_dist_tolerance + 3] = loc
        loc_outputs[row_idx, :] = row_temp.copy()

    # Wirte to excel file
    output_folder = osj("./experiments", "evaluation_results")
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    rep_outputs = pandas.DataFrame(rep_outputs)
    rep_output_path = osj(output_folder, f"normal_eval_results_rep_epoch-{epoch_name}.xlsx")
    rep_outputs.to_excel(
        excel_writer=rep_output_path,
        float_format="%.06f"
    )
    print(f"[Info] Finished writing to {rep_output_path}")

    loc_outputs = pandas.DataFrame(loc_outputs)
    loc_output_path = osj(output_folder, f"normal_eval_results_loc_epoch-{epoch_name}.xlsx")
    loc_outputs.to_excel(
        excel_writer=loc_output_path,
        float_format="%.06f"
    )
    print(f"[Info] Finished writing to {loc_output_path}")