from ulab import numpy as np
from media.sensor import *
from media.display import *
from media.media import *
import image
import time,sys,os,utime

sensor_id=2
blue_threshold=(0, 100, -128, 127, -125, -26)

# 显示模式选择：可以是 "VIRT"、"LCD" 或 "HDMI"
DISPLAY_MODE = "VIRT"

# 根据模式设置显示宽高
if DISPLAY_MODE == "VIRT":
    # 虚拟显示器模式
    DISPLAY_WIDTH = 320
    DISPLAY_HEIGHT = 240
elif DISPLAY_MODE == "LCD":
    # 3.1寸屏幕模式
    DISPLAY_WIDTH = 800
    DISPLAY_HEIGHT = 480
elif DISPLAY_MODE == "HDMI":
    # HDMI扩展板模式
    DISPLAY_WIDTH = 1920
    DISPLAY_HEIGHT = 1080
else:
    raise ValueError("未知的 DISPLAY_MODE，请选择 'VIRT', 'LCD' 或 'HDMI'")

def Display_Init():
    if DISPLAY_MODE == "VIRT":
        Display.init(Display.VIRT, width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT, fps=30)
    elif DISPLAY_MODE == "LCD":
        Display.init(Display.ST7701,width=DISPLAY_WIDTH,height=DISPLAY_HEIGHT,to_ide=False)
    elif DISPLAY_MODE == "HDMI":
        Display.init(Display.LT9611, width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT, to_ide=True)
    else:
        raise ValueError("未知的 DISPLAY_MODE，请选择 'VIRT', 'LCD' 或 'HDMI'")

Ts=1  #怪怪的
# 状态空间矩阵定义
A = np.array([[1,0,0,0,Ts,0],[0,1,0,0,0,Ts],[0,0,1,0,0,0],[0,0,0,1,0,0],[0,0,0,0,1,0],[0,0,0,0,0,1]])
C = np.array([[1,0,0,0,0,0],[0,1,0,0,0,0],[0,0,1,0,0,0],[0,0,0,1,0,0],[0,0,0,0,1,0],[0,0,0,0,0,1]])
# 过程噪声协方差矩阵
Q_value = [1.5e-5,1.5e-5,1e-4,1e-4,4.5e-3,4.5e-3]
Q = np.diag(Q_value) #创建对角矩阵
# 观测噪声协方差矩阵
R_value = [1e-6,1e-64,1e-7,1e-7,1e-5,1e-5]
R = np.diag(R_value)
# 定义观测量Z
x = 0 #左顶点x坐标
y = 0 #左顶点y坐标
w = 0 #矩形框宽度w
h = 0 #矩形框高度h
dx = 0 #左顶点x坐标移动速度
dy = 0 #左顶点y坐标移动速度
Z = np.array([x,y,w,h,dx,dy])   #观察到的值
last_frame_x = x #上一帧左顶点x坐标
last_frame_y = y #上一帧左顶点y坐标
last_frame_location = [0 for _ in range(4)]
last_frame_rect = [0 for _ in range(4)]
box = [0 for _ in range(4)] #生成一个包含四个子列表的列表,存储x1,y1,x2,y2
# 定义卡尔曼滤波函数变量
x_hat = np.array([80,60,30,30,2,2])   #先验估计
x_hat_minus = np.array([0,0,0,0,0,0]) #最优估计
p_value = [10 for _ in range(6)]
p = np.diag(p_value)  #误差协方差矩阵
flag_action = 0
error_range = 7e-1

# 卡尔曼滤波函数
def Kalman_Filter(Z):
    global A,C,Q,R,x_hat,x_hat_minus,p
    # 预测部分
    x_hat_minus = np.dot(A,x_hat)
    p_minus = np.dot(A, np.dot(p, A.T)) + Q
    # 校正部分
    S = np.dot(np.dot(C, p_minus), C.T) + R
    # 选择一个小的正则化项
    regularization_term = 1e-4
    # 正则化 S 矩阵
    S_regularized = S + regularization_term * np.eye(S.shape[0])
    # 计算正则化后的 S 矩阵的逆
    S_inv = np.linalg.inv(S_regularized)
    # 计算卡尔曼增益 K
    K = np.dot(np.dot(p_minus, C.T), S_inv)
    x_hat = x_hat_minus + np.dot(K,(Z - np.dot(C,x_hat_minus)))
    p = np.dot((np.eye(6) - np.dot(K,C)),p_minus)
    return x_hat

def calculate_IoU(box1,box2):
    # 解析矩形框坐标
    x1, y1, x2, y2 = box1
    x3, y3, x4, y4 = box2
    #交集左上角点坐标
    Left_Peak_X = max(x1, x3)
    Left_Peak_Y = max(y1, y3)
    #交集右下角点坐标
    Right_Peak_X = min(x2, x4)
    Right_Peak_Y = min(y2, y4)
    if (Right_Peak_X - Left_Peak_X) > 0 and (Right_Peak_Y - Left_Peak_Y) > 0:
        #交集面积
        intersection_s = (Right_Peak_X - Left_Peak_X) * (Right_Peak_Y - Left_Peak_Y)
        #上一帧的矩形框面积s1
        s1 = (x2 - x1) * (y2 - y1)
        #当前帧的矩形框面积s2
        s2 = (x4 - x3) * (y4 - y3)
        #并集面积
        union_s = (s1 + s2 ) - intersection_s
        #交并比
        IoU = intersection_s / union_s
        return IoU
    else:
        return 0



try:
    #传感器初始化
    sensor=Sensor(id=sensor_id,width=DISPLAY_WIDTH,height=DISPLAY_HEIGHT)
    #复位
    sensor.reset()

    #设置输出尺寸和通道
    sensor.set_framesize(width=DISPLAY_WIDTH,height=DISPLAY_HEIGHT,chn=CAM_CHN_ID_1)
    #设置输出画面格式
    sensor.set_pixformat(Sensor.RGB565,CAM_CHN_ID_1)
    #是否水平翻转
    sensor.set_hmirror(True)
    #是否垂直翻转
    sensor.set_vflip(False)

    #显示设备初始化
    Display_Init()

    #媒体堆栈初始化
    MediaManager.init()

    sensor.run()

    #帧率显示对象初始化
    clock=utime.clock()

    while True:
        os.exitpoint()

        #更新当前时间
        clock.tick()


        #截取当前画面
        img=sensor.snapshot(CAM_CHN_ID_1)
        if flag_action == 0:
            for blob in img.find_blobs([blue_threshold],pixels_threshold=2000,area_threshold=2000):
                if blob:
                    img.draw_rectangle(blob.rect(), color = (255, 0, 0))
                    rect = blob.rect()  #获得色块位置和大小
                    box = [rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3]] #获得色块左上角的点和右上角的点
                    last_frame_rect = rect
                    last_frame_location = box
                    x, y, w, h = rect[0], rect[1], rect[2], rect[3]
                    x_hat = np.array([x,y,w,h,0,0])
                    last_frame_x, last_frame_y = x, y
                    flag_action = 1
                    print('First blob detected and tracking started.')
        if flag_action == 1:
            flag_found_blob = False
            blobs = img.find_blobs([blue_threshold],pixels_threshold=2000,area_threshold=2000)  # 在图像中寻找Aprilblob
            if blobs:
                for blob in blobs:
                    img.draw_rectangle(blob.rect(), color = (0, 255, 0))
                    #获取x,y,w,h
                    rect = blob.rect()
                    box = [rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3]]
                    if calculate_IoU(box,last_frame_location) > 0.2:
                        flag_found_blob = True
                        x, y, w, h = rect[0], rect[1], rect[2], rect[3]
                        dx = (x - last_frame_x) / Ts
                        dy = (y - last_frame_y) / Ts
                        Z = np.array([x, y, w, h, dx, dy])  #预测
                        x_hat = Kalman_Filter(Z)
                        last_frame_x, last_frame_y = x, y
                        #img.draw_rectangle(last_frame_rect, color = (0, 0, 255))
                        img.draw_rectangle(rect, color = (255, 0, 0))
                        last_frame_rect = rect
                        last_frame_location = box
                        print(Z)
                        print(x_hat)
            if not flag_found_blob:
                x,y,w,h = (x_hat[0]+x_hat[4]*Ts),(x_hat[1]+x_hat[5]*Ts),x_hat[2],x_hat[3]
                dx = (x - last_frame_x) / Ts
                dy = (y - last_frame_y) / Ts
                Z = np.array([x, y, w, h, dx, dy])
                x_hat = Kalman_Filter(Z)
                predicted_rect = [
                    int(x_hat[0]),
                    int(x_hat[1]),
                    int(x_hat[2]),
                    int(x_hat[3])
                ]
                img.draw_rectangle(predicted_rect, color = (255, 255, 255))
                last_frame_x, last_frame_y = x, y
                last_frame_rect = [x,y,w,h]
                last_frame_location = [x,y,(x + w),(y + h)]
                print(x_hat)

        #显示当前画面
        Display.show_image(img)
        print("fps: ",clock.fps())
        fps=clock.fps()
        Ts=1.0/fps
        A[0,4],A[1,5]=Ts,Ts


except KeyboardInterrupt as e:
    print("用户停止: ", e)
except BaseException as e:
    print(f"异常: {e}")
finally:
    # 停止传感器运行
    if isinstance(sensor, Sensor):
        sensor.stop()
    # 反初始化显示模块
    Display.deinit()
    os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)
    time.sleep_ms(100)
    # 释放媒体缓冲区
    MediaManager.deinit()


