import numpy as np
import cv2
import time
import tflite_runtime.interpreter as lite
from components.utils import parse_predict, show_image, putText, priors_box,imgHandle,show_image_resize
from components import config

cfg = config.cfg

BZW_OBJECTION_MODEL = './model/jtbz_detection.tflite'
JTBZ_CLASS_MODEL = './model/mobilenet_model.tflite'


class tflite:
    def __init__(self, model_name, model_path=BZW_OBJECTION_MODEL):
        """
        model_name: 模型名称
        model_path: 模型路径
        """
        self.interpreter = lite.Interpreter(model_path=model_path)   # 读取模型
        self.interpreter.allocate_tensors()                          # 分配张量
        self.model_name = model_name
        if model_name == cfg["bzw_detection"]:
            self.priors, _ = priors_box(cfg, image_sizes=(240, 320))
            self.priors = self.priors.astype(np.float16)

        # 获取输入层和输出层维度
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        print(self.model_name + "_input_details", self.input_details)
        print(self.model_name + "_output_datalis", self.output_details)

        # 获取输入数据的形状
        print(self.model_name + "_input_shape", self.input_details[0]['shape'])

    def inference(self, img):
        input_data = img
        self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
        self.interpreter.invoke()     # 推理
        output_data = self.interpreter.get_tensor(self.output_details[0]['index'])    # 获取输出层数据
        return output_data

    def run(self, frame):
        if self.model_name == cfg['bzw_detection']:
            if frame is None:
                print('No camera found')
            frame = cv2.resize(frame, (320, 240))
            h, w, _ = frame.shape
            img = np.float32(frame.copy())
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = img / 255.0 - 0.5
            img = img[np.newaxis, ...]
            start = time.time()
            predictions = self.inference(img)
            # print(predictions.shape)
            boxes, classes, scores = parse_predict(predictions, self.priors, cfg)
            print("boxes_len:",len(boxes), "classes:", len(classes) )
            for prior_index in range(len(classes)):
                 show_image(frame, boxes, classes, scores, h, w, prior_index, cfg['bzw_detection_labels'])

            fps_str = "FPS: %.2f" % (1 / (time.time() - start))
            cv2.putText(frame, fps_str, (25, 25), cv2.FONT_HERSHEY_DUPLEX, 0.75, (0, 255, 0), 2)
            # print(fps_str)
            return frame
        elif self.model_name == cfg['jtbz']:
            if frame is None:
                print('No camera found')
            frame = cv2.resize(frame, (224, 224))
            h, w, _ = frame.shape
            img = np.float32(frame.copy())
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            img = img / 255.0
            img = img[np.newaxis, ...]
            start = time.time()
            y_pred = self.inference(img)

            lable = ['动物', "风情", "花草", "人物"]
            frame = putText(frame, lable[np.argmax(y_pred)], org=(150, 0))
            fps_str = "FPS: %.2f" % (1 / (time.time() - start))
            cv2.putText(frame, fps_str, (0, 25), cv2.FONT_HERSHEY_DUPLEX, 0.75, (0, 255, 0), 2)
            return frame
        elif self.model_name == cfg['hz']:
            ...
        elif self.model_name == cfg['bzw']:
            ...

def main():
    # capture = cv2.VideoCapture("rtsp://admin:888888@192.168.16.100:10554/tcp/av0_0")
    capture = cv2.VideoCapture(1)
    bzw_detection_model = tflite(cfg["bzw_detection"], BZW_OBJECTION_MODEL)
    jtbz_class_model = tflite(cfg["jtbz"], JTBZ_CLASS_MODEL)

    while True:
        _, frame = capture.read()
        bzw_frame = bzw_detection_model.run(frame)
        jtbz_frame = jtbz_class_model.run(frame)
        cv2.imshow('img_objection', bzw_frame)
        cv2.imshow("jtbz", jtbz_frame)
        if cv2.waitKey(1) == ord('q'):
            exit()

if __name__ == "__main__":
    main()

