#  把当前ai_infer直接放在IDE运行，可以屏幕上输出聚集、打架。奔跑，跌倒不显示，但是宽高比，速度计算有结果
# 严格了fig和gat阈值，现在在测试50个跌倒动作时只出现fig



from libs.PipeLine import PipeLine, ScopedTiming  # 导入图像处理管线类和计时器类
from libs.AIBase import AIBase                    # 导入AI推理基础类
from libs.AI2D import Ai2d                        # 导入图像预处理类
import os
import ujson
from media.media import *                         # 导入媒体库（图像采集、显示等）
from time import *
import nncase_runtime as nn                       # 导入NNCase推理引擎库
import ulab.numpy as np                           # 使用轻量化numpy库ulab，适配MCU
import time
import utime
import image
import random
import gc
import sys
import aicube                                     # 导入aicube库（包含后处理函数）

# 行为检测配置参数
DETECTION_CONFIG = {
    # 奔跑检测阈值 (像素/帧)
    "run_speed_threshold": 30,
    # 跌倒检测宽高比阈值 (宽/高 > 阈值判定为跌倒)
    "fall_aspect_ratio": 1.3,
    # 打架/聚集检测相关参数
    "fight_frame_window": 20,    # 检测窗口帧数
    "fight_min_frames": 12,       # 判定打架的最小重合帧数
    "gather_min_frames": 16,     # 判定聚集的最小重合帧数
    "overlap_threshold": 0.6,    # 矩形框重合度阈值
    "behavior_display_duration": 3000  # 行为信息显示时长（毫秒）
}

# 自定义人体检测类，继承自AIBase
class PersonDetectionApp(AIBase):
    def __init__(self,kmodel_path,model_input_size,labels,anchors,
                 confidence_threshold=0.2,nms_threshold=0.5,nms_option=False,
                 strides=[8,16,32],rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0):
        super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode)
        self.kmodel_path = kmodel_path
        self.model_input_size = model_input_size         # 模型输入尺寸
        self.labels = labels                             # 类别标签列表
        self.anchors = anchors                           # 锚框配置
        self.strides = strides                           # 各层输出特征图对应的stride
        self.confidence_threshold = confidence_threshold # 置信度阈值
        self.nms_threshold = nms_threshold               # NMS抑制阈值
        self.nms_option = nms_option                     # 是否开启NMS优化
        # 设置图像采集尺寸（对齐到16）
        self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]]
        self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]]
        self.debug_mode = debug_mode

        # 创建Ai2D实例用于图像预处理（如pad、resize等）
        self.ai2d = Ai2d(debug_mode)
        self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8)

        # 行为检测相关变量初始化
        self.track_history = {}  # 跟踪历史 {id: [(x1, y1, timestamp), ...]}
        self.max_history = 5     # 最大历史记录帧数
        self.frame_buffer = []   # 帧缓存用于打架/聚集检测
        self.person_id = 0       # 人员ID计数器
        self.detection_config = DETECTION_CONFIG
        
        # 新增：行为时间戳记录（键：行为标识，值：首次检测到的时间戳）
        self.behavior_timestamps = {}  # 格式: {"running_1": 123456, "fight": 123456, ...}

    # 配置预处理流程，包括padding和resize
    def config_preprocess(self, input_image_size=None):
        with ScopedTiming("set preprocess config", self.debug_mode > 0):
            # 使用默认输入图像尺寸或传入的自定义尺寸
            ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size
            # 获取padding参数：top, bottom, left, right
            top, bottom, left, right = self.get_padding_param()
            self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [0, 0, 0])
            self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
            # 构建预处理算子图
            self.ai2d.build([1, 3, ai2d_input_size[1], ai2d_input_size[0]],
                            [1, 3, self.model_input_size[1], self.model_input_size[0]])

    # 模型推理结果的后处理
    def postprocess(self, results):
        with ScopedTiming("postprocess", self.debug_mode > 0):
            # 使用aicube提供的anchor-based检测后处理函数
            dets = aicube.anchorbasedet_post_process(
                results[0], results[1], results[2],
                self.model_input_size, self.rgb888p_size,
                self.strides, len(self.labels),
                self.confidence_threshold, self.nms_threshold,
                self.anchors, self.nms_option
            )
            return dets

    # 绘制推理结果（绘制矩形框与类别文字）
    def draw_result(self, pl, dets):
        current_time = utime.ticks_ms()  # 当前时间戳（毫秒）
        with ScopedTiming("display_draw", self.debug_mode > 0):
            if dets:
                pl.osd_img.clear()
                tracked_persons = self.track_persons(dets, current_time)
                # 打印调试信息
                if self.debug_mode > 0:
                    print(f"Tracked persons: {len(tracked_persons)}")
                    for i, p in enumerate(tracked_persons):
                        print(f"Person {i}: {p.keys()}")
                
                # 检测行为并更新时间戳
                current_behaviors = self.detect_behaviors(tracked_persons)
                
                # 绘制每个人员的框和行为信息
                for person in tracked_persons:
                    if 'id' not in person:
                        print(f"Warning: Person without ID: {person}")
                        continue
                    
                    x1, y1, x2, y2 = person['box']
                    pid = person['id']
                    w = x2 - x1
                    h = y2 - y1
                    
                    # 默认样式
                    color = (0, 255, 0, 0)  # 绿色
                    behavior_text = ""
                    
                    # 检查是否有需要显示的行为（基于时间戳）
                    # 奔跑行为
                    run_key = f"running_{pid}"
                    if run_key in self.behavior_timestamps:
                        elapsed = utime.ticks_diff(current_time, self.behavior_timestamps[run_key])
                        if elapsed < self.detection_config["behavior_display_duration"]:
                            color = (0, 0, 255, 0)  # 红色
                            behavior_text = "Running"
                        else:
                            del self.behavior_timestamps[run_key]  # 超时删除
                    
                    # 跌倒行为（若未显示奔跑，再检查跌倒）
                    if not behavior_text:
                        fall_key = f"falling_{pid}"
                        if fall_key in self.behavior_timestamps:
                            elapsed = utime.ticks_diff(current_time, self.behavior_timestamps[fall_key])
                            if elapsed < self.detection_config["behavior_display_duration"]:
                                color = (255, 0, 0, 0)  # 蓝色
                                behavior_text = "Falling"
                            else:
                                del self.behavior_timestamps[fall_key]  # 超时删除
                    
                    # 绘制矩形框和文本
                    pl.osd_img.draw_rectangle(x1, y1, int(w), int(h), color=color, thickness=4)
                    display_text = f"ID:{pid} {behavior_text}"
                    pl.osd_img.draw_string_advanced(x1, y1 - 30, 24, display_text, color=color)
                
                # 绘制全局行为（打架/聚集）
                # 打架检测
                fight_key = "fight"
                if fight_key in self.behavior_timestamps:
                    elapsed = utime.ticks_diff(current_time, self.behavior_timestamps[fight_key])
                    if elapsed < self.detection_config["behavior_display_duration"]:
                        pl.osd_img.draw_string_advanced(50, 50, 32, "Fighting Detected!", color=(255, 0, 255, 0))
                    else:
                        del self.behavior_timestamps[fight_key]  # 超时删除
                # 聚集检测（若未显示打架，再检查聚集）
                else:
                    gather_key = "gather"
                    if gather_key in self.behavior_timestamps:
                        elapsed = utime.ticks_diff(current_time, self.behavior_timestamps[gather_key])
                        if elapsed < self.detection_config["behavior_display_duration"]:
                            pl.osd_img.draw_string_advanced(50, 50, 32, "Gathering Detected!", color=(255, 255, 0, 0))
                        else:
                            del self.behavior_timestamps[gather_key]  # 超时删除
            else:
                pl.osd_img.clear()
                self.track_history.clear()  # 无目标时清空历史

    # 跟踪人员ID和位置
    def track_persons(self, dets, current_time):
        tracked = []
        for det_box in dets:
            x1 = int(det_box[2] * self.display_size[0] / self.rgb888p_size[0])
            y1 = int(det_box[3] * self.display_size[1] / self.rgb888p_size[1])
            x2 = int(det_box[4] * self.display_size[0] / self.rgb888p_size[0])
            y2 = int(det_box[5] * self.display_size[1] / self.rgb888p_size[1])
            
            pid = self.assign_id((x1, y1, x2, y2))
            
            if pid not in self.track_history:
                self.track_history[pid] = []
            self.track_history[pid].append((x1, y1, current_time))
            
            if len(self.track_history[pid]) > self.max_history:
                self.track_history[pid].pop(0)
            
            tracked.append({
                'id': pid,
                'box': (x1, y1, x2, y2),
                'center': ((x1 + x2) // 2, (y1 + y2) // 2)
            })
        
        self.update_frame_buffer(tracked)
        return tracked

    # 分配人员ID（简单的距离匹配）
    def assign_id(self, box):
        x1, y1, x2, y2 = box
        center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
        min_dist = float('inf')
        best_id = None
        
        for pid, history in self.track_history.items():
            if history and len(history) > 0:
                last_x, last_y, _ = history[-1]
                dist = ((center_x - last_x)**2 + (center_y - last_y)** 2)**0.5
                if dist < min_dist and dist < 100:
                    min_dist = dist
                    best_id = pid
        
        if best_id is None:
            best_id = self.person_id
            self.person_id += 1
        
        return best_id

    # 更新帧缓存
    def update_frame_buffer(self, persons):
        boxes = [p['box'] for p in persons]
        self.frame_buffer.append(boxes)
        if len(self.frame_buffer) > self.detection_config['fight_frame_window']:
            self.frame_buffer.pop(0)

    # 检测行为并更新时间戳
    def detect_behaviors(self, persons):
        current_time = utime.ticks_ms()
        current_behaviors = []
        
        # 1. 检测奔跑和跌倒，更新时间戳
        for person in persons:
            pid = person['id']
            x1, y1, x2, y2 = person['box']
            w = x2 - x1
            h = y2 - y1
            
            # 跌倒检测
            if h > 0 and (w / h) > self.detection_config['fall_aspect_ratio']:
                fall_key = f"falling_{pid}"
                if fall_key not in self.behavior_timestamps:  # 首次检测到，记录时间
                    self.behavior_timestamps[fall_key] = current_time
                current_behaviors.append({'id': pid, 'type': 'falling'})
            
            # 奔跑检测
            if pid in self.track_history and len(self.track_history[pid]) >= 2:
                (x_prev, y_prev, t_prev) = self.track_history[pid][-2]
                (x_curr, y_curr, t_curr) = self.track_history[pid][-1]
                dist = ((x_curr - x_prev)**2 + (y_curr - y_prev)** 2)**0.5
                t_diff = max(1, utime.ticks_diff(t_curr, t_prev))
                speed = dist / t_diff * 33
                
                if speed > self.detection_config['run_speed_threshold']:
                    run_key = f"running_{pid}"
                    if run_key not in self.behavior_timestamps:  # 首次检测到，记录时间
                        self.behavior_timestamps[run_key] = current_time
                    current_behaviors.append({'id': pid, 'type': 'running'})
        
        # 2. 检测打架和聚集，更新时间戳
        fight_result = self.detect_fight_or_gather()
        if fight_result == 'fight':
            fight_key = "fight"
            if fight_key not in self.behavior_timestamps:
                self.behavior_timestamps[fight_key] = current_time
            current_behaviors.append({'type': 'fight'})
        elif fight_result == 'gather':
            gather_key = "gather"
            if gather_key not in self.behavior_timestamps:
                self.behavior_timestamps[gather_key] = current_time
            current_behaviors.append({'type': 'gather'})
            
        return current_behaviors

    # 检测打架和聚集
    def detect_fight_or_gather(self):
        if len(self.frame_buffer) < self.detection_config['fight_frame_window']:
            return None
            
        overlap_frames = 0
        for frame_boxes in self.frame_buffer:
            if len(frame_boxes) >= 2 and self.check_box_overlap(frame_boxes):
                overlap_frames += 1
        
        if overlap_frames >= self.detection_config['gather_min_frames']:
            return 'gather'
        elif overlap_frames >= self.detection_config['fight_min_frames']:
            return 'fight'
        return None

    # 检查一帧中是否有矩形框重叠
    def check_box_overlap(self, boxes):
        for i in range(len(boxes)):
            for j in range(i+1, len(boxes)):
                if self.boxes_overlap(boxes[i], boxes[j]):
                    return True
        return False

    # 检查两个矩形框是否重叠
    def boxes_overlap(self, box1, box2):
        x1, y1, x2, y2 = box1
        x3, y3, x4, y4 = box2
        
        overlap_x1 = max(x1, x3)
        overlap_y1 = max(y1, y3)
        overlap_x2 = min(x2, x4)
        overlap_y2 = min(y2, y4)
        
        overlap_area = max(0, overlap_x2 - overlap_x1) * max(0, overlap_y2 - overlap_y1)
        area1 = (x2 - x1) * (y2 - y1)
        area2 = (x4 - x3) * (y4 - y3)
        min_area = min(area1, area2)
        
        return min_area > 0 and (overlap_area / min_area) > self.detection_config['overlap_threshold']

    # 计算图像预处理时的padding填充值
    def get_padding_param(self):
        dst_w, dst_h = self.model_input_size
        input_width, input_high = self.rgb888p_size
        ratio_w = dst_w / input_width
        ratio_h = dst_h / input_high
        ratio = min(ratio_w, ratio_h)
        new_w = int(ratio * input_width)
        new_h = int(ratio * input_high)
        dw = (dst_w - new_w) / 2
        dh = (dst_h - new_h) / 2
        top = int(round(dh - 0.1))
        bottom = int(round(dh + 0.1))
        left = int(round(dw - 0.1))
        right = int(round(dw - 0.1))
        return top, bottom, left, right

# 主程序入口
if __name__ == "__main__":
    display_mode = "lcd"  # 显示模式，可选"hdmi"或"lcd"
    rgb888p_size = [1920, 1080]  # 采集图像分辨率

    # 根据显示模式设置显示分辨率
    display_size = [1920, 1080] if display_mode == "hdmi" else [960, 540]

    # 模型路径
    kmodel_path = "/sdcard/examples/kmodel/person_detect_yolov5n.kmodel"

    # 检测参数配置
    confidence_threshold = 0.2
    nms_threshold = 0.6
    labels = ["person"]
    anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]

    # 初始化图像处理管线
    pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode)
    pl.create()

    # 创建人体检测应用类实例并初始化
    person_det = PersonDetectionApp(
        kmodel_path,
        model_input_size=[640, 640],
        labels=labels,
        anchors=anchors,
        confidence_threshold=confidence_threshold,
        nms_threshold=nms_threshold,
        nms_option=False,
        strides=[8, 16, 32],
        rgb888p_size=rgb888p_size,
        display_size=display_size,
        debug_mode=1
    )
    person_det.config_preprocess()

    try:
        while True:
            os.exitpoint()  # 检查退出点
            with ScopedTiming("total", 1):
                img = pl.get_frame()              # 采集当前图像帧
                res = person_det.run(img)         # 执行模型推理
                person_det.draw_result(pl, res)   # 绘制结果
                pl.show_image()                   # 显示图像
                gc.collect()                      # 回收内存
    except Exception as e:
        sys.print_exception(e)                   # 异常打印
    finally:
        person_det.deinit()                      # 模型反初始化
        pl.destroy()                             # 销毁图像管线