from pathlib import Path
import numpy as np
import onnxruntime as ort
from lightglue_dynamo import viz
from lightglue_dynamo.preprocessors import DISKPreprocessor, SuperPointPreprocessor
from typing import Annotated, Optional, Union

import cv2
import typer

from matplotlib import pyplot as plt
from lightglue_dynamo.config import Extractor, InferenceDevice
PATH_ROOT = Path(__file__).parent
import sys
sys.path.append(str(PATH_ROOT))

class InferSession:
    def __init__(
            self,
            height: int = 512, width: int = 512,
            device: InferenceDevice = InferenceDevice.cpu,
            extractor_type: Extractor = Extractor.superpoint,
            model_path=PATH_ROOT / "weights/superpoint_lightglue_pipeline.onnx",
            fp16: bool = False,
        ):
        self.height = height
        self.width = width
        self.device = device
        self.extractor_type = extractor_type
        self.model_path = model_path
        if not Path(self.model_path).exists():
            raise FileNotFoundError(
                f"Model file not found at {self.model_path}."
                "Please ensure download model from https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v2.0/superpoint_lightglue_pipeline.onnx)"
                "and save to pkgs/LightGlue-ONNX/weights"
            )
        self.fp16 = fp16

        session_options = ort.SessionOptions()

        providers = [("CPUExecutionProvider", {})]
        if device == InferenceDevice.cuda:
            providers.insert(0, ("CUDAExecutionProvider", {}))
        elif device == InferenceDevice.tensorrt:
            providers.insert(0, ("CUDAExecutionProvider", {}))
            providers.insert(
                0,
                (
                    "TensorrtExecutionProvider",
                    {
                        "trt_engine_cache_enable": True,
                        "trt_engine_cache_path": "weights/.trtcache_engines",
                        "trt_timing_cache_enable": True,
                        "trt_timing_cache_path": "weights/.trtcache_timings",
                        "trt_fp16_enable": fp16,
                    },
                ),
            )
        elif device == InferenceDevice.openvino:
            providers.insert(0, ("OpenVINOExecutionProvider", {}))

        self.session = ort.InferenceSession(self.model_path, session_options, providers)
            
    def __call__(self, image1: Union[Path, str, np.ndarray], image2: Union[Path, str, np.ndarray], show: bool = False):
        raw_images, scale_images, scales = [], [], []
        def get_image(image: Union[Path, str, np.ndarray]) -> np.ndarray:
            if isinstance(image, (str, Path)):
                image = cv2.imread(str(image))
            assert  isinstance(image, np.ndarray), "Input must be a valid image path or numpy array."
            raw_images.append(image)
            scale = np.array([self.width / image.shape[1], self.height / image.shape[0]], dtype=np.float32)
            image = cv2.resize(image, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
            scale_images.append(image)
            scales.append(scale)
        get_image(image1)
        get_image(image2)
        images = np.stack(scale_images)
        scales = np.stack(scales)

        match self.extractor_type:
            case Extractor.superpoint:
                images = SuperPointPreprocessor.preprocess(images)
            case Extractor.disk:
                images = DISKPreprocessor.preprocess(images)

        images = images.astype(np.float16 if self.fp16 and self.device != InferenceDevice.tensorrt else np.float32)
        keypoints, matches, mscores = self.session.run(None, {"images": images})

        m_kpts0 = keypoints[0][matches[..., 1]] / scales[0][None]
        m_kpts1 = keypoints[1][matches[..., 2]] / scales[1][None]

        if show:
            viz.plot_images(raw_images)
            viz.plot_matches(m_kpts0, m_kpts1, color="lime", lw=0.2)
            plt.show()
        
        m_kpts0, m_kpts1 = m_kpts0.astype(np.float32), m_kpts1.astype(np.float32)
        return m_kpts0, m_kpts1

if __name__ == '__main__':
    infer_session = InferSession()
    infer_session(
        image1=PATH_ROOT / "assets/sacre_coeur1.jpg",
        image2=PATH_ROOT / "assets/sacre_coeur2.jpg",
        show=True
    )
