from functools import partial
import cv2
import onnxruntime as ort
import numpy as np
import sys
from onnx_infer.utils.utils import nms
import warnings
warnings.filterwarnings("ignore")

class UltraLightFaceDetection():
    def __init__(self,filepath,input_size=(320,240),conf_threshold=0.8,
                 center_variance=0.1,size_variance=0.2,
                 nms_max_output_size=20,nms_iou_threshold=0.3) -> None:
        self._feature_maps = np.array([[40, 30], [20, 15], [10, 8], [5, 4]])
        self._min_boxes = [[10, 16, 24], [32, 48],
                                    [64, 96], [128, 192, 256]]

        self._resize = partial(cv2.resize, dsize=input_size)
        self._input_size = np.array(input_size)[:, None]

        self._anchors_xy, self._anchors_wh = self._generate_anchors()
        self._conf_threshold = conf_threshold
        self._center_variance = center_variance
        self._size_variance = size_variance
        
        self._nms = partial(nms,
                            max_output_size=nms_max_output_size,
                            iou_threshold=nms_iou_threshold)
        
    
        self.ort_session = ort.InferenceSession(filepath)
        self.input_name = self.ort_session.get_inputs()[0].name
        
    def _generate_anchors(self):
        anchors = []
        for feature_map_w_h, min_box in zip(self._feature_maps, self._min_boxes):

            wh_grid = min_box / self._input_size
            wh_grid = np.tile(wh_grid.T, (np.prod(feature_map_w_h), 1))

            xy_grid = np.meshgrid(range(feature_map_w_h[0]),
                                  range(feature_map_w_h[1]))
            xy_grid = np.add(xy_grid, 0.5)

            xy_grid /= feature_map_w_h[..., None, None]

            xy_grid = np.stack(xy_grid, axis=-1)
            xy_grid = np.tile(xy_grid, [1, 1, len(min_box)])
            xy_grid = xy_grid.reshape(-1, 2)

            prior = np.concatenate((xy_grid, wh_grid), axis=-1)
            anchors.append(prior)

        anchors = np.concatenate(anchors, axis=0)
        anchors = np.clip(anchors, 0.0, 1.0)

        return anchors[:, :2], anchors[:, 2:]   
        
    def _pre_processing(self, img):
        resized = self._resize(img)
        image_rgb = resized[..., ::-1]
        image_norm = image_rgb.astype(np.float32)
        cv2.normalize(image_norm, image_norm,
                      alpha=-1, beta=1, norm_type=cv2.NORM_MINMAX)
        image_norm = np.transpose(image_norm,[2,0,1])
        return image_norm[None, ...].astype(np.float32)
        
    def inference(self, img):
        # BGR image to tensor
        input_tensor = self._pre_processing(img)

        scores, boxes = self.ort_session.run(None, {self.input_name: input_tensor})
        
        scores= scores[0]
        boxes = boxes[0]
 
        # decode boxes to corner format
        boxes, scores = self._post_processing(boxes, scores)
        boxes *= np.tile(img.shape[1::-1], 2)
        boxes = self._make_box_square(boxes)
        return boxes, scores
    
    def _post_processing(self, boxes, scores):
        # bounding box regression
        boxes = self._decode_regression(boxes)
        scores = scores[:, 1]
        
        # confidence threshold filter
        conf_mask = self._conf_threshold < scores
        
        boxes, scores = boxes[conf_mask], scores[conf_mask]
        # non-maximum suppression
        nms_mask = self._nms(boxes=boxes, scores=scores)
        boxes = np.take(boxes, nms_mask, axis=0)
        return boxes, scores
    
    def _decode_regression(self, reg):
        # bounding box regression
        center_xy = reg[:, :2] * self._center_variance * \
            self._anchors_wh + self._anchors_xy
        center_wh = np.exp(
            reg[:, 2:] * self._size_variance) * self._anchors_wh / 2

        # center to corner
        start_xy = center_xy - center_wh
        end_xy = center_xy + center_wh
        # fix it to square
        # max_hw =np.stack( [np.max(center_wh,axis=-1)]*2,-1)
        # start_xy = center_xy - max_hw
        # end_xy = center_xy + max_hw

        boxes = np.concatenate((start_xy, end_xy), axis=-1)
        boxes = np.clip(boxes, 0.0, 1.0)
        return boxes
    
    def _make_box_square(self,boxes):
        new_boxes = []
        for box in boxes:
            left = box[0]
            up = box[1]
            right = box[2]
            down = box[3]
            
            center_x = (left+right)/2
            center_y = (up+down)/2
            
            width = right-left
            height = down - up
            
            if width>height:
                line = width
            else:
                line = height
            
            left = center_x-line/2
            right = center_x+line/2
            up= center_y-line/2
            down = center_y+line/2
            new_boxes.append([left,up,right,down])
        return np.array(new_boxes)


if __name__ == "__main__":
    filepath=r'detect\weights\version-slim-320_without_postprocessing.onnx'
    model = UltraLightFaceDetection(filepath)
    input_filename = r'asset\1.jpg'
    img = cv2.imread(input_filename)
    
    boxes,_ = model.inference(img)
    for box in boxes.astype(int):
        print(box)
        cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (255, 255, 0), 4)
        cv2.imshow('dsa',img)
        cv2.waitKey()