from ai_library.components.tflite_infer import TfliteRun
from ai_library.components.config import cfg
from ai_library.components.prior_box import priorsBox
from ai_library.components.utils import parsePredict, showImageZH
from resource.resourcePath import resourcePath
from tools.log import log
import numpy as np
import cv2

class LandmarkRec():
    def __init__(self):
        self.tflite_run = TfliteRun(model_path=resourcePath.getLandmarkDetectModelByTflitePath())
        priors, _ = priorsBox(cfg, image_sizes=cfg["input_size"])
        self.priors = priors.astype(np.float16)
        self.predictions = []

    def imgPreprocessing(self, img):
        input_data = cv2.resize(img, (320, 240))
        input_data = np.float32(input_data.copy())
        input_data = cv2.cvtColor(input_data, cv2.COLOR_BGR2RGB)
        input_data = input_data / 255.0 - 0.5
        input_data = input_data[np.newaxis, ...]
        h, w, _ = img.shape
        return input_data

    def inference(self, img):
        input_data = self.imgPreprocessing(img)
        predictions = self.tflite_run.inference(input_data)
        boxes, classes, scores = parsePredict(predictions, self.priors, cfg)
        self.predictions = [boxes, classes, scores]
        return self.predictions

    def imShow(self, img):
        if self.predictions:
            boxes, classes, scores = self.predictions
            for prior_index in range(len(classes)):
                img = showImageZH(img, boxes, classes, scores,
                                    cfg["cam_height"], cfg["cam_width"],
                                    prior_index, cfg['labels_list'])
            return img

if __name__ == "__main__":
    img = cv2.imread("../resource/image_test/landmark.jpg")
    landmark_rec = LandmarkRec()

    log.info(landmark_rec.inference(img))

    img = landmark_rec.imShow(img)
    cv2.imshow("landmark_rec", img)
    cv2.waitKey(500)
