import logging
import os
import math
import numpy as np
import cv2
import time
from scipy import spatial
from itertools import permutations

# from ..utils.processor import Processor
from ..utils import inference_engine as ie

class Detector():
    def __init__(self, model_path, mode=ie.MODE_ORT, batch_size=None, **kwargs):
        self.mode = mode
        self.kwargs = kwargs
        self.head_strides = [8, 16, 32]
        self.bs = batch_size
        self.anchor_grid = np.array([[10,13,16,30,33,23],[30,61,62,45,59,119],[116,90,156,198,373,326]]).reshape((3, 1, 3, 1, 1, 2))

        if mode == ie.MODE_ORT:
            self.channel_last = False
            self.engine = ie.ORTEngine(model_path, batch_size, **kwargs)
        else:
            assert False, 'error: unsupported mode of inference engine.'

    def detect(self, imgs, conf_thresh=0.40, iou_thrsh=0.3, img_size=512):
        # t0 = time.time()
        inputs, original_shapes, new_shape = self.preprocess(imgs, img_size)
        # t1 = time.time()
        outputs = self.inference(inputs)
        # t2 = time.time()
        dets = self.postprocess(outputs, conf_thresh, iou_thrsh, new_shape, original_shapes)
        # t3 = time.time()
        # logging.debug('pre: %.4fs | inf: %.4fs | post: %.4fs | avg: %.4fs' % (t1-t0, t2-t1, t3-t2, (t3-t0)/len(imgs)))
        return dets

    def preprocess(self, imgs, img_size):
        shapes = np.array([img.shape[:2] for img in imgs])  # N*2, (height,width)
        new_shape = (img_size,img_size)
        imgs = [self._transform(self.letterbox(img, new_shape=new_shape, auto=False)) for img in imgs]
        imgs = [np.stack(imgs, axis=0)]
        return imgs, shapes, new_shape

    def inference(self, inputs):
        outputs = self.engine.invoke(inputs)
        return outputs

    def postprocess(self, outputs, conf_thresh, iou_thresh, new_shape, original_shapes):
        # batch_out has 3 heads
        # each head's outputs4 shape=batch_size*3*h*w(num_classes+5)('3' means different anchor size)
        result = []
        z = []
        for i, head in enumerate(outputs):
            n, _, ny, nx, no = head.shape
            grid = self._make_grid(nx=nx, ny=ny)
            y = self._sigmoid(head)
            y[..., 0:2] = (y[..., 0:2]*2. - 0.5 + grid)*self.head_strides[i]
            y[..., 2:4] = (y[..., 2:4]*2)**2 * self.anchor_grid[i]
            z.append(y.reshape(n, -1, no))

        length = outputs[0].shape[0]
        for i in range(length):
            y = np.expand_dims(np.vstack([z[0][i], z[1][i], z[2][i]]), axis=0)
            dets = self._postprocess_each(y, conf_thresh, iou_thresh, new_shape=new_shape,
                                          origina_shape=original_shapes[i])
            result.append(dets)
        return result

    @staticmethod
    def _make_grid(nx, ny):
        yv, xv = np.meshgrid(np.arange(ny), np.arange(nx), indexing='ij')
        return np.stack([xv, yv], axis=2).reshape((1, 1, ny, nx, 2)).astype(np.float)

    @staticmethod
    def _sigmoid(x):
        # s = 1 / (1+np.exp(-1*x))    # 可能会溢出
        s = 0.5*(1+np.tanh(0.5*x))      # 替代sigmoid
        return s

    @staticmethod
    def _transform(img):
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img)
        img = img / 255.0
        return img.astype(np.float32)

    def _postprocess_each(self, prediction, conf_thresh, iou_thresh, new_shape, origina_shape):
        """
        :param prediction: shape:1*num_possible_bboxes*6, '6' means "x, y, w, h, obj_conference_score, cls_score"
        :param conf_thresh:
        :param origina_shape:
        :return:
        """
        prediction = prediction[0]
        xc = prediction[..., 4] > conf_thresh

        # setting
        max_det = 300  # maximum number of detections per image
        nc = prediction.shape[1] - 5
        multi_label = nc > 1
        output = []

        x = prediction[xc]
        if len(x) < 0:
            return output
        x[:, 5:] *= x[:, 4:5]   # conf = obj_conf * cls_conf

        box = self.xywh2xyxy(x[:, :4])

        # Detections matrix nx6 (xyxy, conf, cls)
        if multi_label:
            i, j = np.nonzero(x[:, 5:] > conf_thresh)
            x = np.hstack([box[i], x[i, j+5, None], j[:, None].astype(np.float)])
        else:
            # best class only
            j = np.argmax(x[:, 5:], axis=1).reshape((-1, 1))
            conf = np.max(x[:, 5:], axis=1, keepdims=True)
            x = np.hstack([box, conf, j])[conf[:, 0] > conf_thresh]    # shape:n*6, (x,y,w,h,score,cls)

        n = x.shape[0]  # number of boxes
        if not n:
            return output

        idx = self._nms(x, iou_thresh)
        if len(idx) > max_det:  # limit detections
            idx = idx[:max_det]
        idx = np.array(idx, dtype=np.int)
        output = self._rescale(x[idx], new_shape, origina_shape)
        return output

    @staticmethod
    def letterbox(img, new_shape=(512, 512), color=(0, 0, 0), auto=False):
        # Resize image to a 32-pixel-multiple rectangle
        shape = img.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)
        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

        # Compute padding
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, 64), np.mod(dh, 64)  # wh padding
        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        if auto:
            return img, img.shape[:2]
        else:
            return img

    @staticmethod
    def _nms(x, iou_thresh):
        bboxes, scores = x[:, :4], x[:, 4]
        # top-left(x1, y1),     bottom-right(x2, y2)
        bx1, by1, bx2, by2 = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]

        bw, bh = bx2-bx1, by2-by1
        areas = (bw+1)*(bh+1)
        order = scores.argsort()[::-1]      # from big to small
        keep = []   # reserved box

        while order.size > 0:
            i = order[0]
            keep.append(i)
            xx1 = np.maximum(bx1[i], bx1[order[1:]])
            yy1 = np.maximum(by1[i], by1[order[1:]])
            xx2 = np.minimum(bx2[i], bx2[order[1:]])
            yy2 = np.minimum(by2[i], by2[order[1:]])

            w = np.maximum(0., xx2-xx1+1)
            h = np.maximum(0., yy2-yy1+1)
            inter = w*h
            union = areas[i]+areas[order[1:]] - inter

            iou = inter / union
            # keep the box whose iou is smaller than iou_thresh
            indexs = np.where(iou <= iou_thresh)[0]
            order = order[indexs+1]

        return keep

    @staticmethod
    def xywh2xyxy(x):
        # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
        y = np.zeros_like(x)
        y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
        y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
        y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
        y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
        return y

    @staticmethod
    def _rescale(x, new_shape, original_shape):
        # Rescale coords (xyxy) from new_shape to original_shape
        bboxes = x[:, :4]
        gain = min(new_shape[0]/original_shape[0], new_shape[1]/original_shape[1])
        pad = (new_shape[1] - original_shape[1] * gain) / 2, (new_shape[0] - original_shape[0] * gain) / 2  # wh padding

        bboxes[:, [0, 2]] -= pad[0]
        bboxes[:, [1, 3]] -= pad[1]
        bboxes[:, :4] /= gain

        np.clip(bboxes[:, 0], 0, original_shape[1], bboxes[:, 0])
        np.clip(bboxes[:, 1], 0, original_shape[0], bboxes[:, 1])
        np.clip(bboxes[:, 2], 0, original_shape[1], bboxes[:, 2])
        np.clip(bboxes[:, 3], 0, original_shape[0], bboxes[:, 3])

        x[:, :4] = bboxes.round()
        return x
