import numpy as np
import tensorflow as tf
import cv2
import time
from components.utils import parse_predict, show_image, putText, priors_box
from components import config
import os

print(tf.__version__)
MODEL = './model/bzw_mask_model.tflite'
MODEL2 = './model/mobilenet_model.tflite'

class tflite:
    def __init__(self, model_path=MODEL):
        self.interpreter = tf.lite.Interpreter(model_path=model_path)  # 读取模型
        self.interpreter.allocate_tensors()                            # 分配张量

    def inference(self, img):
        # 获取输入层和输出层维度
        input_details = self.interpreter.get_input_details()
        output_details = self.interpreter.get_output_details()

        # print("input_details", input_details)
        # print("output_datalis", output_details)

        # 设置输入数据
        input_shape = input_details[0]['shape']

        input_data = img
        self.interpreter.set_tensor(input_details[0]['index'], input_data)
        self.interpreter.invoke()     # 推理
        output_data = self.interpreter.get_tensor(output_details[0]['index'])    # 获取输出层数据
        return output_data

def img_class(frame):
    global start
    frame = cv2.resize(frame, (224, 224))

    if frame is None:
        print('No camera found')

    h, w, _ = frame.shape
    img = np.float32(frame.copy())
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    img = img / 255.0

    img = img[np.newaxis, ...]

    y_pred = img_class_model.inference(img)

    lable = ['动物', "风情", "花草", "人物"]

    frame = putText(frame, lable[np.argmax(y_pred)], org=(150, 0))

    fps_str = "FPS: %.2f" % (1 / (time.time() - start))
    start = time.time()
    cv2.putText(frame, fps_str, (0, 25), cv2.FONT_HERSHEY_DUPLEX, 0.75, (0, 255, 0), 2)

    cv2.imshow('img_class', frame)


def img_objection(frame):
    global start
    frame = cv2.resize(frame, (320, 240))

    if frame is None:
        print('No camera found')

    h, w, _ = frame.shape
    img = np.float32(frame.copy())
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    img = img / 255.0 - 0.5

    img = img[np.newaxis, ...]

    predictions = bzw_model.inference(img)
    print(predictions.shape)

    boxes, classes, scores = parse_predict(predictions, priors, cfg)

    for prior_index in range(len(classes)):
        show_image(frame, boxes, classes, scores, h, w, prior_index, cfg['labels_list'])

    fps_str = "FPS: %.2f" % (1 / (time.time() - start))
    start = time.time()
    cv2.putText(frame, fps_str, (25, 25), cv2.FONT_HERSHEY_DUPLEX, 0.75, (0, 255, 0), 2)

    cv2.imshow('img_objection', frame)

os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
cfg = config.cfg
capture = cv2.VideoCapture(1)
priors, _ = priors_box(cfg, image_sizes=(240, 320))
priors = priors.astype(np.float16)
# priors = tf.cast(priors, tf.float32)
start = time.time()

bzw_model = tflite(MODEL)
img_class_model = tflite(MODEL2)

start = time.time()

while True:
    _, frame = capture.read()
    img_class(frame)
    img_objection(frame)
    if cv2.waitKey(1) == ord('q'):
        exit()