import logging
import os

import cv2
from PyQt5 import QtCore
from PyQt5.QtCore import QCoreApplication
import numpy as np
from anylabeling.app_info import __preferred_device__
from anylabeling.views.labeling.shape import Shape
from anylabeling.views.labeling.utils.opencv import qt_img_to_rgb_cv_img
from .model import Model
from .types import AutoLabelingResult
from .utils.sahi.models.yolov5_onnx import Yolov5OnnxDetectionModel

from .utils.sahi.predict import get_sliced_prediction, get_prediction


class Args:
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)


class YOLOV5MULTModel(Model):
    class Meta:
        required_config_names = [
            "type",
            "name",
            "display_name",
            "model1_path",
            "model2_path",
            "model1_nms_threshold",
            "model1_confidence_threshold",
            "model2_nms_threshold",
            "model2_confidence_threshold",
            "slice_height",
            "slice_width",
            "overlap_height_ratio",
            "overlap_width_ratio",
            "det2_img_size",
            "classes1",
            "classes2"
        ]
        widgets = ["button_run"]
        output_modes = {
            "rectangle": QCoreApplication.translate("Model", "Rectangle"),
        }
        default_output_mode = "rectangle"

    def load_model(self, model_path, nms_threshold, confidence_threshold, classes):
        model_abs_path = self.get_model_abs_path(self.config, model_path)
        if not model_abs_path or not os.path.isfile(model_abs_path):
            raise FileNotFoundError(
                QCoreApplication.translate(
                    "Model", "Could not download or initialize yolov5 mult_models."
                )
            )
        category_mapping = {
            str(ind): category_name
            for ind, category_name in enumerate(classes)
        }
        net = Yolov5OnnxDetectionModel(
            model_path=model_abs_path,
            nms_threshold=nms_threshold,
            confidence_threshold=confidence_threshold,
            category_mapping=category_mapping,
            device=__preferred_device__
        )
        return net

    def __init__(self, model_config, on_message) -> None:
        # Run the parent class's init method
        super().__init__(model_config, on_message)
        self.model1 = self.load_model("model1_path", self.config["model1_nms_threshold"],
                                      self.config["model1_confidence_threshold"], self.config["classes1"])
        self.model2 = self.load_model("model2_path", self.config["model2_nms_threshold"],
                                      self.config["model2_confidence_threshold"], self.config["classes2"])
        self.slice_height = self.config["slice_height"]
        self.slice_width = self.config["slice_width"]
        self.overlap_height_ratio = self.config["overlap_height_ratio"]
        self.overlap_width_ratio = self.config["overlap_width_ratio"]
        self.det1_class = self.config["classes1"]
        self.det2_class = self.config["classes2"]
        self.det2_img_size = [int(a) for a in self.config["det2_img_size"].split(",")]

    # 检测2模型预处理
    def det2_preprocess(self):
        pass

    def predict_shapes(self, image, image_path=None):
        if image is None:
            return []

        try:
            image = qt_img_to_rgb_cv_img(image, image_path)

        except Exception as e:  # noqa
            logging.warning("Could not inference model")
            logging.warning(e)
            return []

        results1 = get_sliced_prediction(
            image,
            self.model1,
            slice_height=self.slice_height,
            slice_width=self.slice_width,
            overlap_height_ratio=self.overlap_height_ratio,
            overlap_width_ratio=self.overlap_width_ratio,
            verbose=0,
        )
        shapes = []
        i = 0
        for out in results1.object_prediction_list:
            x, y = out.bbox.minx, out.bbox.miny
            x1, y1 = out.bbox.maxx, out.bbox.maxy
            cell = image[y:y1, x:x1]
            # letterbox处理
            sub_image = self.letterbox(cell, self.det2_img_size, stride=self.config['stride'])[0]
            i += 1
            results2 = get_prediction(
                sub_image,
                self.model2,
                verbose=0,
            )
            for out in results2.object_prediction_list:
                shape = Shape(label=out.category.name, shape_type="rectangle", flags={})
                # 坐标转换
                box = [out.bbox.minx, out.bbox.miny, out.bbox.maxx, out.bbox.maxy]
                bbox = self.rescale(sub_image.shape, box, cell.shape)
                bbox = [int(co) for co in bbox]
                shape.add_point(QtCore.QPointF(x+bbox[0], y+bbox[1]))
                shape.add_point(QtCore.QPointF(x+bbox[2], y+bbox[3]))
                shapes.append(shape)
        # 是否需要坐标映射需要调试
        result = AutoLabelingResult(shapes, replace=True)
        return result

    @staticmethod
    def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=False, scaleup=True, stride=32,
                  return_int=False):
        '''Resize and pad image while meeting stride-multiple constraints.'''
        shape = im.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)
        elif isinstance(new_shape, list) and len(new_shape) == 1:
            new_shape = (new_shape[0], new_shape[0])

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        if not scaleup:  # only scale down, do not scale up (for better val mAP)
            r = min(r, 1.0)

        # Compute padding
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        if not return_int:
            return im, r, (dw, dh)
        else:
            return im, r, (left, top)

    @staticmethod
    def rescale(ori_shape, box, target_shape):
        '''Rescale the output to the original image shape'''
        ratio = min(ori_shape[0] / target_shape[0], ori_shape[1] / target_shape[1])
        padding = ((ori_shape[1] - target_shape[1] * ratio) / 2, (ori_shape[0] - target_shape[0] * ratio) / 2)
        box[0] -= padding[0]
        box[2] -= padding[0]
        box[1] -= padding[1]
        box[3] -= padding[1]

        box[0] = box[0] / ratio
        box[1] = box[1] / ratio
        box[2] = box[2] / ratio
        box[3] = box[3] / ratio

        box[0] = np.clip(box[0], 0, target_shape[1])  # x1
        box[1] = np.clip(box[1], 0, target_shape[0])  # y1
        box[2] = np.clip(box[2], 0, target_shape[1])  # x2
        box[3] = np.clip(box[3], 0, target_shape[0])  # y2
        return box

    def unload(self):
        del self.model1
        del self.model2
