import os
import ujson
import aicube
from libs.PipeLine import ScopedTiming
from libs.Utils import *
from media.sensor import *
from media.display import *
from media.media import *
import nncase_runtime as nn
import ulab.numpy as np
import image
import gc
import time
from machine import UART, FPIOA

# 全局状态变量
sent_count = 0                # 已发送计数
current_object_start_time = 0 # 当前物体开始时间
total_start_time = 0          # 总开始时间
has_sent_current = False      # 当前物体是否已发送
last_send_time = 0            # 新增：最后发送时间
COOLDOWN = 10                 # 新增：冷却时间10秒
TIMEOUT = 13                  # 修改：超时时间改为13秒

# 配置引脚和UART初始化
fpioa = FPIOA()
fpioa.set_function(11, FPIOA.UART2_TXD)
fpioa.set_function(12, FPIOA.UART2_RXD)
uart = UART(UART.UART2, baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE)

# 显示配置
display_mode = "lcd"
if display_mode == "lcd":
    DISPLAY_WIDTH = ALIGN_UP(800, 16)
    DISPLAY_HEIGHT = 480
else:
    DISPLAY_WIDTH = ALIGN_UP(1920, 16)
    DISPLAY_HEIGHT = 1080

OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16)
OUT_RGB888P_HEIGH = 720

root_path = "/sdcard/mp_deployment_source/"
config_path = root_path + "deploy_config.json"
debug_mode = 1

def two_side_pad_param(input_size, output_size):
    ratio_w = output_size[0] / input_size[0]
    ratio_h = output_size[1] / input_size[1]
    ratio = min(ratio_w, ratio_h)
    new_w = int(ratio * input_size[0])
    new_h = int(ratio * input_size[1])
    dw = (output_size[0] - new_w) / 2
    dh = (output_size[1] - new_h) / 2
    top = int(round(dh - 0.1))
    bottom = int(round(dh + 0.1))
    left = int(round(dw - 0.1))
    right = int(round(dw - 0.1))
    return top, bottom, left, right, ratio

def read_deploy_config(config_path):
    with open(config_path, 'r') as json_file:
        return ujson.load(json_file)

def label_to_waste_category(label):
    if label in [4, 5, 6]:
        return "@3\r\n"
    elif label in [2, 3, 9]:
        return "@1\r\n"
    elif label in [7, 8]:
        return "@4\r\n"
    else:
        return "@2\r\n"

def detection():
    global sent_count, current_object_start_time, total_start_time, has_sent_current, last_send_time

    # 初始化状态变量
    sent_count = 0
    total_start_time = time.time()
    current_object_start_time = time.time()
    last_send_time = 0  # 新增初始化
    has_sent_current = False

    print("det_infer start")
    deploy_conf = read_deploy_config(config_path)
    kmodel_name = deploy_conf["kmodel_path"]
    labels = deploy_conf["categories"]
    confidence_threshold = 0.5
    nms_threshold = deploy_conf["nms_threshold"]
    img_size = deploy_conf["img_size"]
    num_classes = deploy_conf["num_classes"]
    color_four = get_colors(num_classes)
    nms_option = deploy_conf["nms_option"]
    model_type = deploy_conf["model_type"]

    if model_type == "AnchorBaseDet":
        anchors = deploy_conf["anchors"][0] + deploy_conf["anchors"][1] + deploy_conf["anchors"][2]

    kmodel_frame_size = img_size
    frame_size = [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH]
    strides = [8, 16, 32]

    top, bottom, left, right, ratio = two_side_pad_param(frame_size, kmodel_frame_size)

    kpu = nn.kpu()
    kpu.load_kmodel(root_path + kmodel_name)
    ai2d = nn.ai2d()
    ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8)
    ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114])
    ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
    ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], [1,3,kmodel_frame_size[1],kmodel_frame_size[0]])

    sensor = Sensor()
    sensor.reset()
    sensor.set_hmirror(False)
    sensor.set_vflip(False)
    sensor.set_framesize(width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT)
    sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420)
    sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGH, chn=CAM_CHN_ID_2)
    sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2)

    sensor_bind_info = sensor.bind_info(x=0, y=0, chn=CAM_CHN_ID_0)
    Display.bind_layer(**sensor_bind_info, layer=Display.LAYER_VIDEO1)

    if display_mode == "lcd":
        Display.init(Display.ST7701, to_ide=True)
    else:
        Display.init(Display.LT9611, to_ide=True)

    osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888)
    MediaManager.init()
    sensor.run()

    rgb888p_img = None
    ai2d_input_tensor = None
    data = np.ones((1,3,kmodel_frame_size[1],kmodel_frame_size[0]), dtype=np.uint8)
    ai2d_output_tensor = nn.from_numpy(data)

    # 主循环增加退出条件
    while sent_count < 10 and (time.time() - total_start_time) <= 180:
           with ScopedTiming("total", debug_mode > 0):
               current_time = time.time()
               time_since_last_send = current_time - last_send_time
   
               # 冷却期检查（新增逻辑）
               if time_since_last_send < COOLDOWN:
                   # 显示剩余冷却时间
                   print(f"[COOLDOWN] Waiting {COOLDOWN - time_since_last_send:.1f}s")
                   # 重置物体检测计时器
                   current_object_start_time = current_time  
                   continue
   
               # 处理超时逻辑（修改为13秒）
               if (current_time - current_object_start_time) >= TIMEOUT and not has_sent_current:
                   print("[UART SEND] Timeout, sending default @1")
                   uart.write("@1\r\n".encode())
                   sent_count += 1
                   has_sent_current = True
                   last_send_time = current_time  # 记录发送时间
                   print(f"[PROGRESS] Sent: {sent_count}/10")
   
                   if sent_count < 10:
                       current_object_start_time = time.time()
                       has_sent_current = False
                   continue
   
               rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2)
               osd_img.clear()

           if rgb888p_img.format() == image.RGBP888:
                ai2d_input = rgb888p_img.to_numpy_ref()
                ai2d_input_tensor = nn.from_numpy(ai2d_input)
                ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor)
                kpu.set_input_tensor(0, ai2d_output_tensor)
                kpu.run()

                results = []
                for i in range(kpu.outputs_size()):
                    out_data = kpu.get_output_tensor(i)
                    results.append(out_data.to_numpy().reshape(-1))
                    del out_data

                det_boxes = aicube.anchorbasedet_post_process(
                    results[0], results[1], results[2], kmodel_frame_size,
                    frame_size, strides, num_classes, confidence_threshold,
                    nms_threshold, anchors, nms_option
                )

                if det_boxes and not has_sent_current:
                    # 找出置信度最高的物体
                    max_confidence = 0
                    for box in det_boxes:
                        if box[1] > max_confidence:
                            max_confidence = box[1]
                            best_box = box

                    # 新增：置信度阈值检查
                    if max_confidence < 0.6:  # 过滤低置信度检测
                        print("[WARNING] Low confidence detection ignored")
                        continue

                    label_idx = best_box[0]
                    current_category = label_to_waste_category(label_idx)

                    # 发送识别结果
                    print(f"[UART SEND] Sending category: {current_category}")
                    uart.write(current_category.encode())
                    sent_count += 1
                    has_sent_current = True
                    last_send_time = current_time  # 记录最后发送时间
                    print(f"[PROGRESS] Sent: {sent_count}/10")

                    # 绘制检测结果
                    for box in det_boxes:
                        x1, y1, x2, y2 = box[2:]
                        x = int(x1 * DISPLAY_WIDTH / OUT_RGB888P_WIDTH)
                        y = int(y1 * DISPLAY_HEIGHT / OUT_RGB888P_HEIGH)
                        w = int((x2 - x1) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH)
                        h = int((y2 - y1) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGH)
                        osd_img.draw_rectangle(x, y, w, h, color=color_four[box[0]][1:])
                        text = f"{labels[box[0]]} {box[1]:.2f}"
                        osd_img.draw_string(x, y-40, text, color=color_four[box[0]][1:], scale=2)

                    # 准备下一个物体
                    if sent_count < 10:
                        current_object_start_time = time.time()
                        has_sent_current = False

                Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3)
                gc.collect()

           rgb888p_img = None

    # 清理资源
    del ai2d_input_tensor
    del ai2d_output_tensor
    sensor.stop()
    Display.deinit()
    MediaManager.deinit()
    gc.collect()
    time.sleep(1)
    nn.shrink_memory_pool()
    print("det_infer end")
    return 0

if __name__ == "__main__":
    detection()
