"""
This file contains the full line detectors
Takes raw images as inputs and perform
    (1) junction heatmap prediction
    (2) line detection algorithm
"""
import torch
import numpy as np

from model.model_util import get_model
from model.loss import get_loss_and_weights
from model.line_detection_val import SimpleLineDetector_LCNN2
from train import convert_junc_predictions
from torch.nn.functional import softmax
from evaluation.eval_utils import line_map_to_segments
from postprocess.convert_homography_results_v2 import line_NMS_v2

import time


# Our full line detector (version 2): including compatibility with torch and local-max methods.
class LineDetectorOurs2(object):
    def __init__(self,
                 model_cfg,
                 ckpt_path,
                 device,
                 line_detector_cfg,
                 junc_detect_thresh=None,
                 use_short_line_nms=False
                 ):
        """
        model_cfg: config for CNN model
        ckpt_path: path to the weights
        line_detector_cfg: configuration file for the line detection module
        use_short_line_nms: Apply the line NMS that'll suppress short overlapping segments.
        """
        # Get loss weights if dynamic weighting
        _, loss_weights = get_loss_and_weights(model_cfg, device)
        self.device = device

        # Initialize the cnn backbone
        self.model = get_model(model_cfg, loss_weights)
        checkpoint = torch.load(ckpt_path, map_location=self.device)

        # 去除多gpu模型字典中的'module'
        import collections
        for k, v in checkpoint.items():
            new_k = k.replace('module.', '') if 'module' in k else k
            checkpoint[new_k] = v
            if type(checkpoint[new_k]) == dict:
                for key, value in checkpoint[new_k].items():
                    new_key = key.replace('module.', '') if 'module' in key else key
                    checkpoint[new_k][new_key] = v
            if type(checkpoint[new_k]) == collections.OrderedDict:
                ck = collections.OrderedDict()
                for key, value in checkpoint[new_k].items():
                    if 'module' in key:
                        new_key = key.replace('module.', '')
                        ck[new_key] = value
                    else:
                        ck[key] = value
                checkpoint[new_k] = ck

        self.model = self.restore_weights(self.model, checkpoint["model_state_dict"])
        # self.model.load_state_dict(checkpoint["model_state_dict"], strict=False)
        self.model = self.model.to(self.device)
        self.model = self.model.eval()

        self.grid_size = model_cfg["grid_size"]

        if junc_detect_thresh is not None:
            self.junc_detect_thresh = junc_detect_thresh
        else:
            self.junc_detect_thresh = model_cfg["detection_thresh"]
            self.junc_detect_thresh = 1 / 65

        # Initialize the line detector
        self.line_detector_cfg = line_detector_cfg
        self.line_detector = SimpleLineDetector_LCNN2(**line_detector_cfg)

        self.use_line_nms = use_short_line_nms

        # Print some debug messages
        for key, val in line_detector_cfg.items():
            print(f"[Debug] {key}: {val}")
        # print("[Debug] detect_thresh: %f" % (line_detector_cfg["detect_thresh"]))
        # print("[Debug] num_samples: %d" % (line_detector_cfg["num_samples"]))

    def __call__(self, input_image, valid_mask=None, mask_board=None, return_heatmap=False, profile=False):
        # Now we restrict input_image to 4D torch tensor
        if (not len(input_image.shape) == 4) or (not isinstance(input_image, torch.Tensor)):
            raise ValueError("[Error] the input image should be a 4D torch tensor")

        # Move the input to corresponding device
        input_image = input_image.to(self.device)

        # Forward of the CNN backbone
        start_time = time.time()
        with torch.no_grad():
            net_outputs = self.model(input_image)
        junc_np = convert_junc_predictions(net_outputs["junctions"],
                                           self.grid_size, self.junc_detect_thresh, 300)
        if valid_mask is None:
            junctions = np.where(junc_np["junc_pred_nms"].squeeze())
        else:
            junctions = np.where(junc_np["junc_pred_nms"].squeeze() * valid_mask)
        junctions = np.concatenate([junctions[0][..., None], junctions[1][..., None]], axis=-1)

        if net_outputs["heatmap"].shape[1] == 2:
            # Convert to single channel directly from here
            heatmap = softmax(net_outputs["heatmap"], dim=1)[:, 1:, :, :].cpu().numpy().transpose(0, 2, 3, 1)
        else:
            heatmap = torch.sigmoid(net_outputs["heatmap"]).cpu().numpy().transpose(0, 2, 3, 1)
        # heatmap = softmax(net_outputs["heatmap"].detach(), dim=1).cpu().numpy().transpose(0, 2, 3, 1)
        heatmap = heatmap[0, :, :, 0]
        # if mask_board is not None:
        #     heatmap = heatmap * mask_board

        # Run the line detector.
        # import ipdb; ipdb.set_trace()
        line_map, junctions, heatmap = self.line_detector.detect_torch(
            junctions,
            heatmap,
            recover_junction=False,
            device=self.device
        )
        heatmap = heatmap.cpu().numpy()
        if isinstance(line_map, torch.Tensor):
            line_map = line_map.cpu().numpy()
        if isinstance(junctions, torch.Tensor):
            junctions = junctions.cpu().numpy()
        # If it's a line map with multiple detect_thresh and inlier_thresh
        if len(line_map.shape) > 2:
            num_detect_thresh = line_map.shape[0]
            num_inlier_thresh = line_map.shape[1]
            line_segments = []
            for detect_idx in range(num_detect_thresh):
                line_segments_inlier = []
                for inlier_idx in range(num_inlier_thresh):
                    line_map_tmp = line_map[detect_idx, inlier_idx, :, :]
                    line_segments_tmp = line_map_to_segments(junctions, line_map_tmp)
                    line_segments_inlier.append(line_segments_tmp)
                line_segments.append(line_segments_inlier)
        else:
            if self.use_line_nms:
                # import ipdb; ipdb.set_trace()
                _, junctions_nms, line_map_nms = line_NMS_v2(
                    line_map, junctions, input_image.shape[2:], self.device
                )
                line_segments = line_map_to_segments(junctions_nms, line_map_nms)
            else:
                line_segments = line_map_to_segments(junctions, line_map)
            # import ipdb; ipdb.set_trace()
        end_time = time.time()

        outputs = {"line_segments": line_segments}

        if return_heatmap:
            outputs["heatmap"] = heatmap
        if profile:
            outputs["time"] = end_time - start_time

        return outputs

    def restore_weights(self, model, state_dict):
        # Try to directly load state dict
        try:
            model.load_state_dict(state_dict)
        except:
            err = model.load_state_dict(state_dict, strict=False)
            # missing keys are those in model but not in state_dict
            missing_keys = err.missing_keys
            # Unexpected keys are those in state_dict but not in model
            unexpected_keys = err.unexpected_keys

            # Load mismatched keys manually
            model_dict = model.state_dict()
            for idx, key in enumerate(missing_keys):
                dict_keys = [_ for _ in unexpected_keys if not "tracked" in _]
                model_dict[key] = state_dict[dict_keys[idx]]
            model.load_state_dict(model_dict)

        return model