import cv2
import numpy as np
import onnxruntime as ort
import time
import math
from kem import kem
import random
# import serialwws
# import serial

#上机标志
car=0
#debug
debug=0
#图像尺寸640x480
#-----可调参数-----
anQuanQu=1 # 1:红色安全区 2:蓝色安全区
yiZhuaQu = 0
#抓取区坐标设定
# zhuaX0=250
# zhuaY0=340
# zhuaX1=390
# zhuaY1=480
zhuaquX = 320
zhuaquY = 410
#iou丢失最大值
noioumax=6
noanquanioumax=6

#-----其他参数-----
maxnums=30


# 新增letterbox函数处理图像缩放
def letterbox(im, new_shape=(320, 320), color=(114, 114, 114), auto=False, stride=32):
    # 调整图像尺寸并保持长宽比
    shape = im.shape[:2]  # 原始尺寸 [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # 计算缩放比例
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # 填充量

    # 分割填充区域
    dw /= 2
    dh /= 2

    # 缩放图像
    if shape[::-1] != new_unpad:
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
    return im, r, (dw * 2, dh * 2)  # 返回填充后的图像、缩放比例和总填充量

def plot_one_box(x, img, color=None, label=None, line_thickness=None):
    tl = (
        line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
    )  # line/font thickness
    color = color or [random.randint(0, 255) for _ in range(3)]
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
    if label:
        tf = max(tl - 1, 1)  # font thickness
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )

def _make_grid( nx, ny):
        xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
        return np.stack((xv, yv), 2).reshape((-1, 2)).astype(np.float32)

def cal_outputs(outs,nl,na,model_w,model_h,anchor_grid,stride):
    row_ind = 0
    grid = [np.zeros(1)] * nl
    for i in range(nl):
        h, w = int(model_w/ stride[i]), int(model_h / stride[i])
        length = int(na * h * w)
        if grid[i].shape[2:4] != (h, w):
            grid[i] = _make_grid(w, h)

        outs[row_ind:row_ind + length, 0:2] = (outs[row_ind:row_ind + length, 0:2] * 2. - 0.5 + np.tile(
            grid[i], (na, 1))) * int(stride[i])
        outs[row_ind:row_ind + length, 2:4] = (outs[row_ind:row_ind + length, 2:4] * 2) ** 2 * np.repeat(
            anchor_grid[i], h * w, axis=0)
        row_ind += length
    return outs


def post_process_opencv(outputs, model_h, model_w, img_h, img_w, thred_nms, thred_cond, ratio, pad):
    if len(outputs) == 0:
        return [], [], []
    conf = outputs[:, 4].tolist()

    # 获取类别索引（关键修正）
    cls_id = np.argmax(outputs[:, 5:], axis=1)  # 计算每个框的类别索引

    # 坐标转换
    pad_x = pad[0] / 2
    pad_y = pad[1] / 2
    c_x = (outputs[:, 0] - pad_x) / ratio
    c_y = (outputs[:, 1] - pad_y) / ratio
    w = outputs[:, 2] / ratio
    h = outputs[:, 3] / ratio

    # 边界保护
    c_x = np.clip(c_x, 0, img_w)
    c_y = np.clip(c_y, 0, img_h)
    w = np.clip(w, 1, img_w - c_x)
    h = np.clip(h, 1, img_h - c_y)

    # 生成边界框
    p_x1 = c_x - w / 2
    p_y1 = c_y - h / 2
    p_x2 = c_x + w / 2
    p_y2 = c_y + h / 2
    areas = np.stack((p_x1, p_y1, p_x2, p_y2), axis=-1)

    # NMS处理
    ids = cv2.dnn.NMSBoxes(areas.tolist(), conf, thred_cond, thred_nms)
    if len(ids) > 0:
        return areas[ids], np.array(conf)[ids], cls_id[ids]  # 返回类别索引
    return [], [], []


# 修改后的预处理和后处理函数
def infer_img(img0, net, model_h, model_w, nl, na, stride, anchor_grid, thred_nms=0.4, thred_cond=0.25):
    # 使用letterbox预处理
    img, ratio, pad = letterbox(img0, new_shape=(model_w, model_h))
    img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR转RGB，HWC转CHW
    img = np.ascontiguousarray(img)
    img = img.astype(np.float32) / 255.0
    blob = np.expand_dims(img, axis=0)

    # 模型推理
    outs = net.run(None, {net.get_inputs()[0].name: blob})[0].squeeze(axis=0)

    # 坐标解码
    outs = cal_outputs(outs, nl, na, model_w, model_h, anchor_grid, stride)

    # 后处理（加入ratio和pad参数）
    boxes, confs, ids = post_process_opencv(outs, model_h, model_w, img0.shape[0], img0.shape[1],
                                            thred_nms, thred_cond, ratio, pad)
    return boxes, confs, ids



def qiu_process(qiushu,qiuXin,zuijinqiu):
    minzhi=999999
    for a in range(0,qiushu):
        if minzhi>qiuXin[a][2]:
            zuijinqiu[0] = qiuXin[a][0]
            zuijinqiu[1]=qiuXin[a][1]
            zuijinqiu[2]=qiuXin[a][3]
            minzhi= qiuXin[a][2]
    return zuijinqiu

def get_iou(box1,box2):

    x1min, y1min, x1max, y1max = box1[0], box1[1], box1[2], box1[3]
    x2min, y2min, x2max, y2max = box2[0], box2[1], box2[2], box2[3]
    # 计算两个框的面积
    s1 = (y1max - y1min + 1.) * (x1max - x1min + 1.)
    s2 = (y2max - y2min + 1.) * (x2max - x2min + 1.)
    # 计算相交部分的坐标
    xmin = max(x1min, x2min)
    ymin = max(y1min, y2min)
    xmax = min(x1max, x2max)
    ymax = min(y1max, y2max)
    inter_h = max(ymax - ymin + 1, 0)
    inter_w = max(xmax - xmin + 1, 0)
    intersection = inter_h * inter_w
    union = s1 + s2 - intersection
    # 计算iou
    iou = intersection / union
    if debug:
        print("iou: ",iou)
    return iou

def create_kalman_filter():
    # 状态矩阵: [x, y, vx, vy] - [位置 x, 位置 y, 速度 vx, 速度 vy]
    kalman = cv2.KalmanFilter(4, 2)
    # 状态转移矩阵
    kalman.transitionMatrix = np.array([[1, 0, 1, 0],
                                        [0, 1, 0, 1],
                                        [0, 0, 1, 0],
                                        [0, 0, 0, 1]], dtype=np.float32)
    # 测量矩阵
    kalman.measurementMatrix = np.array([[1, 0, 0, 0],
                                         [0, 1, 0, 0]], dtype=np.float32)
    # 初始估计
    kalman.statePre = np.zeros((4, 1), dtype=np.float32)
    # 测量噪声协方差
    kalman.measurementNoiseCov = np.eye(2, dtype=np.float32) * 1e-1
    # 过程噪声协方差
    kalman.processNoiseCov = np.eye(4, dtype=np.float32) * 1e-1
    # 初始误差协方差
    kalman.errorCovPre = np.eye(4, dtype=np.float32) * 1
    return kalman

if __name__ == "__main__":
    #目标标志
    qiusign=0
    anquansign=0
    anquan_yuce=0
    xinanquansign=0
    #卡尔曼初始化
    kem_qiu=kem()
    kem_anquan=kem()
    #丢失iou计次
    find_iou = 0
    noiousign=0
    noanquansign=0
    if car:
        #串口初始化
        serialwws.serialwws_init(115200)
    #iou
    iou_th=0.1
    iou_anquan_th=0.1
    xinqiusign=1
    iou_last=np.zeros(4,dtype=int)
    iou_anquan_last=np.zeros(4,dtype=int)

    # 模型加载
    model_pb_path = "onnxmodel/best_exp5.onnx"
    so = ort.SessionOptions()
    net = ort.InferenceSession(model_pb_path, so)

    # 标签字典
    dic_labels= {0:'blue',
            1:'red',
            2:'black',
            3:'yellow',
            4:'lanqu',
            5:'hongqu',
            6:'wuguan',
            7:'anquan'}

    # 模型参数
    model_h = 320
    model_w = 320
    nl = 3
    na = 3
    # 模型参数确保与导出时一致
    stride = [8.0, 16.0, 32.0]
    anchors = [[10, 13, 16, 30, 33, 23],
               [30, 61, 62, 45, 59, 119],
               [116, 90, 156, 198, 373, 326]]
    anchor_grid = np.asarray(anchors, dtype=np.float32).reshape(3, -1, 2)

    #摄像头加载
    video = 0
    # cap = cv2.VideoCapture(video)
    cap = cv2.VideoCapture("videos/12.mp4")
    if debug:
        print("w:",int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
        print("h:",int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    flag_det = False
    #
    anQuanXin = [0, 0, 0]
    zuijinqiu = [0, 0, 0, 0]
    while True:
        #-----
        yizhuaqusign=0
        qiushu = 0
        qiuXin = np.zeros((maxnums, 4), dtype=int)
        #
        anQuanXin[0]=0
        anQuanXin[1]=0
        zuijinqiu[0]=0
        zuijinqiu[1]=0
        zuijinqiu[2]=0
        #-----
        success, img0 = cap.read()
        img0=cv2.resize(img0,(640,480))
        if success:
            if flag_det:
                t1 = time.time()
                det_boxes,scores,ids = infer_img(img0,net,model_h,model_w,nl,na,stride,anchor_grid,thred_nms=0.4,thred_cond=0.25)
                t2 = time.time()
                jiancedaoanquan=0
                lunxunzhi=0
                # print('ids: ',ids)
                # print('box: ',det_boxes)
                for box,score,idd in zip(det_boxes,scores,ids):
                    label = '%s:%.2f' % (dic_labels[idd], score)
                    plot_one_box(box.astype(np.int16), img0, color=(255, 0, 0), label=label, line_thickness=None)
                    # #检测抓取区是否有物体
                    # if box[0]>zhuaX0 and box[0]<zhuaX1 and box[1]>zhuaY0 and box[1]<zhuaY1:
                    #     if box[2] > zhuaX0 and box[2] < zhuaX1 and box[3] > zhuaY0 and box[3] < zhuaY1:
                    #         yizhuaqusign=1
                    #         if (anQuanQu==1 and idd==3) or (anQuanQu==3 and idd==0):
                    #             if debug:
                    #                 print("抓取颜色错误")
                    #         else:
                    #             if debug:
                    #                 print("成功抓取")

                    #yizhuaqusign=get_qiu()
                    #处理目标物坐标信息
                    #安全区
                    if (anQuanQu==1 and idd==5) or (anQuanQu==2 and idd==4):
                        #规避0值
                        jiancedaoanquan=lunxunzhi+1
                        anquansign=1
                    else:
                        anquansign-=1
                    #小球
                    if (anQuanQu==1 and idd>=1 and idd<=3) or (anQuanQu==2 and (idd==0 or (idd>=2 and idd<=3))):
                        find_qiu=0
                        if jiancedaoanquan:
                            if not (box[2]>det_boxes[jiancedaoanquan-1][0] and  box[0]<det_boxes[jiancedaoanquan-1][2] and box[3]>det_boxes[jiancedaoanquan-1][1] and box[1]<det_boxes[jiancedaoanquan-1][3]):
                                find_qiu=1
                        if jiancedaoanquan==0 or find_qiu==1:
                            qiuXin[qiushu][0]=int((box[0]+box[2])/2)
                            qiuXin[qiushu][1]=int((box[1]+box[3])/2)
                            qiuXin[qiushu][2]=int(math.sqrt(pow(qiuXin[qiushu][0]-zhuaquX,2)+pow(qiuXin[qiushu][1]-zhuaquY,2)))
                            qiuXin[qiushu][3]=lunxunzhi
                            qiushu+=1
                            qiusign=1
                    else :
                        qiusign-=1
                    lunxunzhi+=1

                #安全区数据处理
                kem_anquan_out =[0,0,0,0]
                if xinanquansign and jiancedaoanquan!=0:
                    xinanquansign=0
                    iou_anquan_last=det_boxes[jiancedaoanquan-1]
                    kem_anquan=kem(det_boxes[jiancedaoanquan-1])
                    #
                    anQuanXin[2] = ids[jiancedaoanquan - 1]
                elif jiancedaoanquan!=0 :
                    noanquansign=0
                    anquan_yuce=0
                    iou_anquan_last=det_boxes[jiancedaoanquan-1]
                    kem_anquan_out=kem_anquan.kem_get_iou(iou_anquan_last)
                else:
                    noanquansign+=1
                    if noanquanioumax>=noanquanioumax:
                        xinanquansign=1
                        # print("未找到安全区")
                    else:
                        anquan_yuce=1
                        kem_anquan_out=kem_anquan.kem_no_iou()
                if xinanquansign==0:
                    anQuanXin[0] = (kem_anquan_out[0] + kem_anquan_out[2]) // 2
                    anQuanXin[1] = (kem_anquan_out[1] + kem_anquan_out[3]) // 2

                #球数据处理
                # print("找新球标志",xinqiusign,"--球数",qiushu)
                if xinqiusign and qiushu!=0:
                    xinqiusign=0
                    zuijinqiu=qiu_process(qiushu,qiuXin,zuijinqiu)
                    iou_last=det_boxes[zuijinqiu[2]]
                    #目标追踪
                    kem_qiu=kem(det_boxes[zuijinqiu[2]])
                    #
                    zuijinqiu[3] = ids[zuijinqiu[2]]
                    if debug:
                        print("目标物更新")
                else:
                    maxiou=np.zeros(2,dtype=np.float32)
                    alun=0
                    find_iou = 0
                    kem_out=[0,0,0,0]
                    for box,idd in zip(det_boxes,ids):
                        if (anQuanQu==1 and idd>=1 and idd<=3) or (anQuanQu==2 and (idd==0 or (idd>=2 and idd<=3))):
                            iou=get_iou(iou_last,box)
                            if iou>maxiou[0] and iou>=iou_th:
                                find_iou=1
                                maxiou[0]=iou
                                maxiou[1]=alun
                        alun+=1
                    if find_iou:
                        noiousign=0
                        iou_last = det_boxes[int(maxiou[1])]
                        kem_out=kem_qiu.kem_get_iou(iou_last)
                        if debug:
                            print("get_iou")
                    else :
                        noiousign+=1
                        if noiousign>=noioumax:
                            noiousign=0
                            xinqiusign=1
                        else :
                            kem_out=kem_qiu.kem_no_iou()
                        if debug:
                            print("no_iou")
                    zuijinqiu[0]=(kem_out[0]+kem_out[2])//2
                    zuijinqiu[1]=(kem_out[1]+kem_out[3])//2

                str_FPS = "FPS: %.2f"%(1./(t2-t1))
                cv2.putText(img0,str_FPS,(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),3)
                #安全区
                # cv2.rectangle(img0,(zhuaX0,zhuaY0),(zhuaX1,zhuaY1),(255,255,0),2)
                cv2.circle(img0,(zhuaquX,zhuaquY),30,(255,255,0),3)
                #安全区或目标物体
                xpian=0
                ypian=0

                if xinanquansign==0:
                    if anquan_yuce==0:
                        cv2.line(img0, (zhuaquX, zhuaquY),(int(anQuanXin[0]), int(anQuanXin[1])), (255, 100, 0), 2)
                    else:
                        cv2.line(img0, (zhuaquX, zhuaquY),(int(anQuanXin[0]), int(anQuanXin[1])), (0, 0, 255), 2)
                    xpian=zhuaquX-anQuanXin[0]
                    ypian=zhuaquY-anQuanXin[1]
                    print("安全区：","X偏移：",xpian," Y偏移：",ypian)
                    if car:
                        #串口输出
                        serialwws.serialwws_send_bao(xpian,ypian,anQuanXin[2],1)
                if xinqiusign==0:
                    if find_iou:
                        cv2.line(img0, (zhuaquX, zhuaquY), (int(zuijinqiu[0]), int(zuijinqiu[1])), (0, 255, 0), 2)
                    else:
                        cv2.line(img0, (zhuaquX, zhuaquY), (int(zuijinqiu[0]), int(zuijinqiu[1])), (0, 0, 255), 2)
                    xpian=zhuaquX-zuijinqiu[0]
                    ypian=zhuaquY-zuijinqiu[1]
                    print("小球：","X偏移：",xpian," Y偏移：",ypian)
                    if car:
                        #串口输出
                        serialwws.serialwws_send_bao(xpian,ypian,zuijinqiu[3],0)
            kem_qiu.draw_trace(img0,0,255,255)
            kem_anquan.draw_trace(img0,0,255,255)
            cv2.imshow("video",img0)
        key=cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break
        elif key & 0xFF == ord('s'):
            flag_det = not flag_det
        elif key & 0xFF==ord('x'):
            xinqiusign=1
    cap.release()
    cv2.destroyAllWindows()
