# Edge Impulse - OpenMV Object Detection Example
import sensor, image, time,pyb,omv,math,utime,tf,lcd,gc
from pyb import UART,Pin,Timer,Servo
from umotor import UMOTOR
from pid import PID
from ultrasonic import ULTRASONIC
from button import BUTTON
from centroidtracking import CentroidTracking
from pycommon import PYCOMMON
common=PYCOMMON()
objtraking=CentroidTracking(maxDisappeared=5) #目标追踪算法，质心追踪maxDisappeared表示最大丢失5帧代表目标消失
motor=UMOTOR()   #声明电机驱动，B0,B1,B4,B5
button=BUTTON()  #声明按键，梦飞openmv只有一个按键D8，因此直接内部指定了按键
object_s=0
flag_lost=0
max_object=(0,0,0,0,0.0)
lcd.init()
lcd.set_direction(2)
#小车追踪PID
#dis_pid = PID(p=0.5, i=0.01)
#len_pid = PID(p=0.2, i=0.01)
dis_pid = PID(p=0.3, i=0.01)
len_pid = PID(p=0.15, i=0.01)
net = None
labels = None
confidence=0.7

try:
    # load the model, alloc the model file on the heap if we have at least 64K free after loading
    labels,net = tf.load_builtin_model('yoloface')
    #net = tf.load("yoloface.tflite",load_to_fb=True)
except Exception as e:
    raise Exception('Failed to load "yoloface", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')

print(net)


def face_detect(img):
    max_object=(0,0,0,0,0.0)
    rid=img.width()/img.height()
    xrect=[]
    object_s=0.0
    objects=net.detect_yolo(img,confidence=0.75, anchors=[9,14,12,17,22,21],nms=0.2)#anchors=[9,14,12,17,22,21]#anchors=[21,28,34,49,61,77]
    #if (len(objects) == 0): continue # no detections for this class?
    if len(objects):
        for d in objects :
            #rect=(d.x(), d.y(), d.w(), d.h())
            rect=(int(d.x()/rid), d.y(), int(d.w()/rid), d.h())
            img.draw_rectangle(rect,color=(255,255,0)) #检测为白色框
            img.draw_string(d.x(), d.y()-10, "face %.3f"%(d.output()))
            xrect.append(rect)
    objects=objtraking.update(xrect)
    if len(objects):
        max_object=common.find_max_object(objects)
        object_s=15000/(max_object[2]*2) #计算距离
    return object_s,max_object

#功能： 自动找球控制
#输入： 图像，小球距离,最大面积的小球元组
#输出： 无
def face_traking_auto_control(img):
    global flag_lost
    object_s=0
    object_s,max_blob=face_detect(img)
    if object_s>0:
        pan_error=0
        flag_lost=0
        pan_error = img.width()/2-(max_blob[2]/2+max_blob[0])
        power_s,power_l=common.car_traking(object_s,pan_error,100,30,100,len_pid,dis_pid)
        motor.run(power_s+power_l,power_s-power_l)
    else :
        motor.run(0,0) #停止等待目标出现
        flag_lost=flag_lost+1
        if flag_lost>5:#连续5帧没有
            flag_lost=0


#--------------定时器部分 START -------------------
'''
is_need_exposeure_sensor = False # 是否需要发送数据的信号标志
def uart_time_trigger(timer):
    global is_need_exposeure_sensor
    is_need_exposeure_sensor = True
# 初始化定时器
tim = Timer(4, freq=0.02)
# 设定定时器的回调函数
tim.callback(uart_time_trigger)
'''
#--------------定时器部分 END -------------------


sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE)    # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.HQVGA)
clock = time.clock()

click_timer=time.ticks() #计时参数
common.Auto_exposeure_sensor() #只在开始时做一次曝光控制，目前自动曝光有可能影响追踪，暂时不做定时曝光
while(True):
    #if is_need_exposeure_sensor:
    #    Auto_exposeure_sensor()
    #    is_need_exposeure_sensor=False
    # 拍摄图片
    clock.tick()
    img = sensor.snapshot()
    face_traking_auto_control(img)
    print(clock.fps(), "fps", end="\n\n")
    lcd.display(img)
    if button.state():
        click_timer=time.ticks()          #开始计时
        while button.state():  pass       #等待按键抬起
        if time.ticks()-click_timer>2000: #按键时长超过2s
            break                         #循环退出，回到主界面
    else :
        click_timer=time.ticks()#计时更新

