"""
brief :Editor cjh
"""
import os.path
import time
import traceback
from typing import Any

import openvino.runtime as ov
import cv2
import numpy as np
import openvino.preprocess as op
from numpy import ndarray
from openvino import InferRequest
import logging
from numba import jit

logging.basicConfig(level=logging.INFO, format='%(levelname)s %(asctime)s [%(filename)s:%(lineno)d] %(message)s',
                    datefmt='%Y.%m.%d. %H:%M:%S',
                    filemode="a")
date = time.strftime('%y-%m-%d', time.localtime(time.time()))
daytime = time.strftime('%H-%M', time.localtime(time.time()))
# 记录log文件
logfile_path = f"log data"
if not os.path.exists(logfile_path):
    os.makedirs(logfile_path)


def record_log(name_process):
    logger = logging.getLogger(name_process)
    # logger.addHandler(logging.FileHandler(
    #     f"{logfile_path}/{name_process}-{date}-{daytime}.log"))
    return logger


class Colors:
    def __init__(self):
        # hex = matplotlib.colors.TABLEAU_COLORS.values()
        hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
                '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
        self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
        self.n = len(self.palette)

    def rgb_call(self, i, bgr=False):
        c = self.palette[int(i) % self.n]
        return (c[2], c[1], c[0]) if bgr else c

    @staticmethod
    def hex2rgb(h):  # rgb order (PIL)
        return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))

    @staticmethod
    def rgb2hex(rgb):
        """RGB转HEX
        :param rgb: RGB颜色元组，Tuple[int, int, int]
        :return: int or str
        """
        r, g, b = rgb
        result = r + (g << 8) + (b << 16)
        return hex(result)[2:]


@jit(nopython=True)
# only use to v5
def accle_for_V5(detections: np.ndarray, confidence: float):
    detections_out = []
    for prediction in detections:
        _confidence_ = prediction[4].item()  # 获取置信度
        # 第一次进行过滤，过滤掉大部分没用信息，但是设定阈值不能太高
        if _confidence_ >= 2 * confidence / 3:
            detections_out.append(prediction)
    return detections_out


@jit(nopython=True)
# only use to v8
def accle_for_V8(detections: np.ndarray, confidence: float):
    detections_out = []
    for prediction in detections:
        _confidence_ = np.max(prediction[4:])
        # 第一次进行过滤，过滤掉大部分没用信息，但是设定阈值不能太高
        if _confidence_ >= 2 * confidence / 3:
            detections_out.append(prediction)
    return detections_out


class Vino(Colors):
    def __init__(self, model_path="/home/nuc2/PycharmProjects/yolov5-master/best_ball.xml"
                 , weights_path="/home/nuc2/PycharmProjects/yolov5-master/best_ball.bin"
                 , conf_thres=0.55
                 , line_thickness=1
                 , iou_thres=0.4
                 , classes=[]
                 , img_size=640
                 , blob_acc=False
                 , device="GPU"
                 , view_img=True
                 , name_process="Vino"):
        super(Vino, self).__init__()
        self.confidence = conf_thres
        self.line_thickness = line_thickness
        self.device = device
        self.iou_thres = iou_thres
        self.classes = classes
        self.img_size = img_size
        self.view_img = view_img
        self.logger = record_log(name_process)
        self.logger.info(
            f"{model_path, weights_path, conf_thres, line_thickness, iou_thres, img_size, blob_acc, device}")
        # 在实例化的时候就开辟空间给内核激活
        self.detector = self.Core(model_path=model_path, weights_path=weights_path, blob_flag=blob_acc)

    def letter_box(self, box_, img, ratio, confidence_box, class_id) -> (np.ndarray, np.uint8):
        color = self.rgb_call(class_id, True)
        # box_ 代表着xyxy
        box_[0] = int(ratio * (box_[0] - box_[2] / 2))
        box_[1] = int((box_[1] - (box_[3] / 2)) * ratio)
        box_[2] = int(ratio * box_[2]) + box_[0]
        box_[3] = int(box_[3] * ratio) + box_[1]
        if self.view_img:
            cv2.rectangle(img, (int(box_[0]), int(box_[1])), (int(box_[2]), int(box_[3])), color,
                          self.line_thickness)  # 绘制物体框
            cv2.putText(img, f"conf:{str(int(confidence_box * 100))}%,id:{class_id}",
                        (int(box_[0]), int(box_[1]) - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        self.line_thickness * 0.5, color, self.line_thickness)
        size_decrease = (int(img.shape[1]), int(img.shape[0]))
        img_decrease = cv2.resize(img, size_decrease, interpolation=cv2.INTER_CUBIC)
        return box_, img_decrease.astype(np.uint8)

    def Core(self, model_path: str, weights_path: str, blob_flag: bool) -> InferRequest:
        try:
            core = ov.Core()
            #  读取用YOLOv5模型转换而来的IR模型，并判断通过初始模型还是使用编译后模型
            if ".blob" in [os.path.splitext(file)[-1] for file in
                           os.listdir(os.path.dirname(weights_path))] and blob_flag:
                config = {}
                model = core.read_model(model=model_path)
            else:
                config = {"CACHE_DIR": os.path.dirname(weights_path)} if blob_flag else {}  # 是否保存blob文件
                model = core.read_model(model=model_path, weights=weights_path)
            ppp = op.PrePostProcessor(model)
            ppp.input().tensor().set_element_type(ov.Type.f32).set_layout(ov.Layout("NHWC")).set_color_format(
                op.ColorFormat.BGR)
            ppp.input().preprocess().convert_element_type(ov.Type.f32).convert_color(op.ColorFormat.RGB).scale(
                [255., 255., 255.])
            ppp.input().model().set_layout(ov.Layout("NCHW"))
            ppp.output(0).tensor().set_element_type(ov.Type.f32)
            model = ppp.build()
            # 加载模型，可用CPU or GPU
            compile_model = core.compile_model(model, self.device, config)
            # 推理结果
            results = compile_model.create_infer_request()
            self.logger.info("Success to Start")
            return results
        except:
            self.logger.info(traceback.format_exc())

    # 这个方法的目的是为了变为设定比例的图像，即padding为了更好的记录边缘信息
    @staticmethod
    def Preprocess(image, new_shape):
        old_size = image.shape[:2]
        # 记录新形状和原生图像矩形形状的比率
        ratio = float(new_shape[-1] / max(old_size))
        # 新尺寸
        new_size = tuple([int(x * ratio) for x in old_size])
        image = cv2.resize(image, (new_size[1], new_size[0]))
        dw = new_shape[1] - new_size[1]
        dh = new_shape[0] - new_size[0]
        color = [100, 100, 100]
        new_im = cv2.copyMakeBorder(image, 0, dh, 0, dw, cv2.BORDER_CONSTANT, value=color)
        return new_im, 1 / ratio

    # 后处理函数
    def Postprocessing(self, detections: np.ndarray, method: str) -> tuple[Any, Any, ndarray[int]]:
        try:
            if method == "V8":  # 由于v8的模型为[1,8400,xx],转换为[xx,8400]进行处理
                detections = np.array(accle_for_V8(detections, self.confidence))  # 置信度加速筛除
                confidences_array = np.max(detections[..., 4:], axis=-1)
                boxes = detections[..., :4]
            else:
                # 先通过置信度过滤大部分的选项,v8中的得分也代表置信度，则无序再做置信度筛除
                detections = np.array(accle_for_V5(detections, self.confidence))  # 置信度加速筛除
                confidences_array = np.max(detections[..., 5:], axis=-1)
                boxes = detections[..., :4]
            # 通过opencv的nms，筛选出最合适的目标
            indexes = cv2.dnn.NMSBoxes(boxes, confidences_array, self.confidence,
                                       self.iou_thres)
            final_detections = detections[indexes]
            return boxes[indexes], confidences_array[indexes], np.argmax(
                final_detections[..., 4:], axis=1) - 1
        except ValueError:
            self.logger.error("NO Data in image!")  # 发起一个缺失值报错
            raise

    def run(self, img) -> (np.uint8, list, list):
        # 尺寸处理,变成一个贴合矩形
        try:
            img_copy = img.copy()
            img_re, ratio = self.Preprocess(img, (self.img_size, self.img_size))
            # 获得输入张量
            input_tensor = np.expand_dims(img_re, 0)
            # 输入到推理引擎
            self.detector.infer({0: input_tensor})
            # # 获得推理结果，推理结果v8与v5一致，只是由于anchor的改变，需要更改nms与iou的数据处理
            output = self.detector.get_output_tensor(0)  # 三维数组，需要解套[1,xx,xx]->[xx,xx]，来读取信息
            # # 获得检测数据
            # 判断是v8的模型还是v5，通过判断数据最大尺寸来确定,由于NCHW的原因，需要对数据进行转置处理,然后放入数据处理函数
            if np.argmax(output.shape) == 2:
                detections = self.Postprocessing(np.squeeze(output.data).transpose((1, 0)), "V8")
            else:
                detections = self.Postprocessing(np.squeeze(output.data), "V5")
            # # 以下是将输出的队列
            boxes, confidences_deque, class_ids = detections
            # 将输出结果输出到画面上
            det_out = []
            for index in range(len(boxes)):
                # 排除class外的
                if len(self.classes) > 0:
                    if class_ids[index] in self.classes:
                        box_, img_copy = self.letter_box(boxes[index], img_copy, ratio, confidences_deque[index],
                                                         class_ids[index])
                        det_out.append([box_, confidences_deque[index], class_ids[index]])
                else:
                    box_, img_copy = self.letter_box(boxes[index], img_copy, ratio, confidences_deque[index],
                                                     class_ids[index])
                    det_out.append([box_, confidences_deque[index], class_ids[index]])

            #  # 返回值存在4个,分别是box,置信度,目标物的id,和代表色
            return img_copy.astype(np.uint8), det_out
        except Exception:
            self.logger.error(traceback.format_exc())


if __name__ == '__main__':
    model_path, weights_path = r"D:\eyedetector\weights\best.xml", r"D:\eyedetector\weights\best.bin"
    confidence = 0.5
    vino1 = Vino(model_path, weights_path, confidence, device="CPU", img_size=640, view_img=True)
    while True:
        frame = cv2.imread(r"D:\eyedetector\source_imgs\input_images\front.jpg")
        frame, det = vino1.run(frame)
        cv2.imshow("frame", frame)
        if cv2.waitKey(1) & 0xFF == 27:
            break
