from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from tflite_runtime.interpreter import Interpreter
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
import time
import io
import argparse
import sensors
import gui
import servo


import sys
sys.path.append("..")

#import picamera

#import tensorflow as tf


key_detect = 0


def load_labels(path):
    with open(path, 'r') as f:
        return {i: line.strip() for i, line in enumerate(f.readlines())}


def set_input_tensor(interpreter, image):
    tensor_index = interpreter.get_input_details()[0]['index']
    input_tensor = interpreter.tensor(tensor_index)()[0]
    input_tensor[:, :] = image


def classify_image(interpreter, image, top_k=1):
    """Returns a sorted array of classification results."""
    set_input_tensor(interpreter, image)
    interpreter.invoke()
    output_details = interpreter.get_output_details()[0]
    output = np.squeeze(interpreter.get_tensor(output_details['index']))

    # If the model is quantized (uint8 data), then dequantize the results
    if output_details['dtype'] == np.uint8:
        scale, zero_point = output_details['quantization']
        output = scale * (output - zero_point)

    ordered = np.argpartition(-output, top_k)
    return [(i, output[i]) for i in ordered[:top_k]]


def cv2ImgAddText(img, text, left, top, textColor=(80,90,156), textSize=20):
    if (isinstance(img, np.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    # 创建一个可以在给定图像上绘图的对象
    draw = ImageDraw.Draw(img)
    # 字体的格式
    fontStyle = ImageFont.truetype(
        "simsun.ttc", textSize, encoding="utf-8")
    # 绘制文本
    draw.text((left, top), text, textColor, font=fontStyle)
    # 转换回OpenCV格式
    return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

def main():
    #   parser = argparse.ArgumentParser(
    #       formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    #   parser.add_argument(
    #       '--model', help='File path of .tflite file.', required=True)
    #   parser.add_argument(
    #       '--labels', help='File path of labels file.', required=True)
    #   args = parser.parse_args()

    labels = load_labels("model1/labels.txt")

    #interpreter = tf.lite.Interpreter(args.model)
    interpreter = Interpreter("model1/model.tflite")

    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']

    # with picamera.PiCamera(resolution=(640, 480), framerate=30) as camera:
    # camera.start_preview()

    
    # 擷取畫面 寬度 設定為640
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    # # 擷取畫面 高度 設定為480
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    cap = cv2.VideoCapture(0)
    cap.set(640,480)
    sensors.setup()
    servo.init()
    global key_detect
    times = 1
    while True:
        ret, image_src = cap.read()

        frame_width = image_src.shape[1]
        frame_height = image_src.shape[0]

        cut_d = int((frame_width-frame_height)/2)

        crop_img = image_src[0:frame_height, cut_d:(cut_d+frame_height)]

        image = cv2.resize(crop_img, (224, 224), interpolation=cv2.INTER_AREA)

        start_time = time.clock()
        if (times == 1):
            results = classify_image(interpreter, image)
            elapsed_ms = (time.time() - start_time) * 1000
            label_id, prob = results[0]

            print(label_id,prob,time.clock()-start_time)
        #cv2.putText(crop_img,labels[label_id] + " " + str(round(prob,3)), (5,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 1, cv2.LINE_AA)
        detect = cv2ImgAddText(
            crop_img, labels[label_id] + " " + str(round(prob, 3)), 10, 65, (0, 0, 139), 40)
        times = times+1
        if (times > 1):
            times = 1
       
        sensors.scan_sensors()
        gui.gui_update(detect)
        gui.window['gar1'].update(labels[label_id])
        btn=gui.gui_scan()
        # cv2.imshow('Detecting....',crop_img)
        
        servo.pull_gar(label_id,prob)
        fps=4
        while fps:
            cap.grab()
            fps=fps-1
            if fps<1:
                break
            if btn==3:
                break
        # print(gui.recording,sens_call)
        if (btn == 3):
            break

    cap.release()
    sensors.destroy()
    # cv2.destroyAllWindows()


if __name__ == '__main__':
    main()
