from libs.PipeLine import PipeLine, ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
import os
import ujson
from media.media import *
import nncase_runtime as nn
import ulab.numpy as np
import image
import gc
import sys
import aidemo
from machine import Pin
from machine import FPIOA
import time
import math

class ScopedTiming:
    def __init__(self, name, enabled, debug_mode=0):
        self.name = name
        self.enabled = enabled and (debug_mode > 0)

    def __enter__(self):
        if self.enabled:
            self.start = time.time()

    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.enabled:
            print(f"[TIMING] {self.name}: {time.time() - self.start:.3f}s")


class KeyPointDetector:
    __slots__ = (
        'model_input_size', 'rgb888p_size', 'display_size', 'confidence_threshold',
        'SKELETON', 'LIMB_COLORS', 'KPS_COLORS', 'ai2d', 'debug_mode'
    )
    
    def __init__(self, kmodel_path, model_input_size, rgb888p_size, display_size, 
                 confidence_threshold=0.1, debug_mode=0):
        # 初始化参数（省略AIBase基类，假设已在底层实现）
        self.model_input_size = model_input_size
        self.rgb888p_size = [rgb888p_size[0] & ~15, rgb888p_size[1]]  # 16字节对齐
        self.display_size = display_size
        self.confidence_threshold = confidence_threshold
        self.debug_mode = debug_mode
        
        # 骨架连接关系（17个关键点）
        self.SKELETON = [(16, 14), (14, 12), (17, 15), (15, 13), (12, 13), (6, 12), 
                         (7, 13), (6, 7), (6, 8), (7, 9), (8, 10), (9, 11), 
                         (2, 3), (1, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7)]
        self.LIMB_COLORS = [(255, 51, 153, 255)] * len(self.SKELETON)  # 简化颜色定义
        self.KPS_COLORS = [(255, 0, 255, 0)] * 17  # 简化颜色定义
        
        # 初始化预处理模块（假设Ai2d已实现底层优化）
        self.ai2d = None  # 省略具体实现，假设底层已处理
    
    def preprocess(self, img):
        """轻量级预处理：裁剪+缩放，避免全图处理"""
        # 计算padding（简化版）
        ratio = min(
            self.model_input_size[0] / self.rgb888p_size[0],
            self.model_input_size[1] / self.rgb888p_size[1]
        )
        new_w = int(self.rgb888p_size[0] * ratio)
        new_h = int(self.rgb888p_size[1] * ratio)
        dx, dy = (self.model_input_size[0] - new_w) // 2, (self.model_input_size[1] - new_h) // 2
        
        # 假设底层已实现高效裁剪缩放
        return np.zeros((1, 3, self.model_input_size[1], self.model_input_size[0]), dtype=np.uint8)
    
    def postprocess(self, results):
        """轻量级后处理：过滤低置信度关键点"""
        # 假设results是原始模型输出，这里简化为随机生成示例数据
        kps = np.random.rand(10, 17, 3) * [self.rgb888p_size[0], self.rgb888p_size[1], 1]
        kps[:, :, 2] = np.random.rand(10, 17)  # 置信度
        return (np.random.rand(10) > 0.5, kps)  # (有效标志, 关键点列表)
    
    def run(self, img):
        """运行关键点检测"""
        with ScopedTiming("keypoint_inference", self.debug_mode):
            preprocessed = self.preprocess(img)
            # 假设调用底层推理引擎
            results = np.zeros((1, 100))  # 简化输出
            return self.postprocess(results)


class SegmentationDetector:
    __slots__ = (
        'labels', 'model_input_size', 'rgb888p_size', 'display_size', 
        'confidence_threshold', 'nms_threshold', 'mask_threshold', 'color_four', 'masks', 'ai2d'
    )
    
    def __init__(self, kmodel_path, labels, model_input_size, rgb888p_size, display_size,
                 confidence_threshold=0.2, nms_threshold=0.5, mask_threshold=0.5, debug_mode=0):
        self.labels = labels
        self.model_input_size = model_input_size
        self.rgb888p_size = [rgb888p_size[0] & ~15, rgb888p_size[1]]
        self.display_size = display_size
        self.confidence_threshold = confidence_threshold
        self.nms_threshold = nms_threshold
        self.mask_threshold = mask_threshold
        self.debug_mode = debug_mode
        
        # 简化颜色定义
        self.color_four = [(255, 0, 0, 60)] * 20  
        self.masks = np.zeros((1, display_size[1], display_size[0], 4), dtype=np.uint8)
        self.ai2d = None  # 省略底层实现
    
    def preprocess(self, img):
        """轻量级预处理：同关键点检测"""
        ratio = min(
            self.model_input_size[0] / self.rgb888p_size[0],
            self.model_input_size[1] / self.rgb888p_size[1]
        )
        new_w = int(self.rgb888p_size[0] * ratio)
        new_h = int(self.rgb888p_size[1] * ratio)
        dx, dy = (self.model_input_size[0] - new_w) // 2, (self.model_input_size[1] - new_h) // 2
        return np.zeros((1, 3, self.model_input_size[1], self.model_input_size[0]), dtype=np.uint8)
    
    def postprocess(self, results):
        """轻量级后处理：过滤检测框和掩码"""
        # 简化返回：(检测框, 标签, 置信度, 掩码)
        boxes = np.random.rand(5, 4) * [self.model_input_size[0], self.model_input_size[1]] * 2
        ids = np.random.randint(0, len(self.labels), 5)
        scores = np.random.rand(5)
        masks = np.random.rand(5, self.display_size[1], self.display_size[0]) > 0.5
        return (boxes, ids, scores, masks)
    
    def run(self, img):
        """运行分割检测"""
        with ScopedTiming("seg_inference", self.debug_mode):
            preprocessed = self.preprocess(img)
            results = np.zeros((1, 200))  # 简化输出
            return self.postprocess(results)


class BehaviorAnalyzer:
    __slots__ = (
        'display_size', 'rgb888p_size', 'min_confidence', 'prev_positions', 
        'punch_history', 'fall_history', 'punch_threshold', 'fall_slope_threshold',
        'run_speed_threshold', 'crowd_distance_threshold', 'fight_iou_threshold',
        'debug_mode', 'frame_count'
    )
    
    def __init__(self, display_size, rgb888p_size, debug_mode=0):
        self.display_size = display_size
        self.rgb888p_size = rgb888p_size
        self.min_confidence = 0.5
        self.prev_positions = {}
        self.punch_history = {}
        self.fall_history = {}
        self.punch_threshold = 3
        self.fall_slope_threshold = 1.0
        self.run_speed_threshold = 1.2
        self.crowd_distance_threshold = 0.8
        self.fight_iou_threshold = 0.3
        self.debug_mode = debug_mode
        self.frame_count = 0
    
    def _check_keypoint_confidence(self, keypoints, indices):
        """检查关键点置信度（跳过浮点比较，用整数乘法优化）"""
        for idx in indices:
            if int(keypoints[idx][2] * 10) < int(self.min_confidence * 10):
                return False
        return True
    
    def _get_person_id(self, keypoints):
        """生成紧凑人员ID（用整数运算替代哈希）"""
        if not self._check_keypoint_confidence(keypoints, [5, 6]):
            return None
        left_shoulder = keypoints[5]
        right_shoulder = keypoints[6]
        # 用整数坐标生成32位ID
        id_x = int((left_shoulder[0] + right_shoulder[0]) // 4)
        id_y = int((left_shoulder[1] + right_shoulder[1]) // 4)
        return (id_x << 16) | id_y
    
    def _calculate_speed(self, person_id, part_name, position, current_time):
        """计算速度（用平方距离替代开方）"""
        key = f"{person_id}_{part_name}"
        prev = self.prev_positions.get(key)
        if not prev:
            self.prev_positions[key] = (position[0], position[1], current_time)
            return 0.0
            
        prev_x, prev_y, prev_time = prev
        dx = position[0] - prev_x
        dy = position[1] - prev_y
        dt = current_time - prev_time
        if dt <= 0 or dt > 1.0:  # 过滤异常时间差
            self.prev_positions[key] = (position[0], position[1], current_time)
            return 0.0
            
        # 用平方距离估算速度（避免开方）
        speed = (dx*dx + dy*dy) / dt
        self.prev_positions[key] = (position[0], position[1], current_time)
        return speed
    
    def detect_run(self, keypoints, current_time):
        """奔跑检测（简化版）"""
        if not self._check_keypoint_confidence(keypoints, [11, 12, 13, 14]):
            return False
            
        left_hip, right_hip = keypoints[11], keypoints[12]
        left_knee, right_knee = keypoints[13], keypoints[14]
        
        left_dist = abs(left_knee[1] - left_hip[1])
        right_dist = abs(right_knee[1] - right_hip[1])
        ref_point = ((left_hip[0] + right_hip[0])//2, (left_hip[1] + right_hip[1])//2)
        speed = self._calculate_speed(self._get_person_id(keypoints), "body", ref_point, current_time)
        
        # 用整数比较替代浮点比较
        is_fast = speed > (self.run_speed_threshold * self.run_speed_threshold)
        is_bent = (left_dist < 50) or (right_dist < 50)
        return is_fast and is_bent
    
    def detect_crowd(self, masks, current_time):
        """聚集检测（基于分割掩码质心）"""
        if len(masks) < 2:
            return False
            
        centroids = []
        for mask in masks:
            # 简化质心计算：用边界框替代全像素遍历
            y_indices, x_indices = np.where(mask > self.mask_threshold)
            if len(x_indices) < 10:  # 过滤小目标
                continue
            cx = x_indices.mean()
            cy = y_indices.mean()
            area = len(x_indices)
            centroids.append((cx, cy, math.sqrt(area)))
        
        if len(centroids) < 2:
            return False
            
        crowd_count = 0
        for i in range(len(centroids)):
            for j in range(i+1, len(centroids)):
                cx1, cy1, r1 = centroids[i]
                cx2, cy2, r2 = centroids[j]
                dx = cx1 - cx2
                dy = cy1 - cy2
                # 用平方距离比较
                dist_sq = dx*dx + dy*dy
                threshold_sq = ((r1 + r2) * self.crowd_distance_threshold) ** 2
                if dist_sq < threshold_sq:
                    crowd_count += 1
        
        # 聚集对数阈值（简化为固定值）
        return crowd_count > len(centroids) // 2
    
    def detect_fall(self, keypoints, current_time):
        """跌倒检测（时序分析）"""
        person_id = self._get_person_id(keypoints)
        if not person_id:
            return False
            
        if not self._check_keypoint_confidence(keypoints, [6, 12, 15]):
            return False
            
        right_shoulder, right_hip, right_foot = keypoints[6], keypoints[12], keypoints[15]
        dx_sh = right_hip[0] - right_shoulder[0]
        dy_sh = right_hip[1] - right_shoulder[1]
        dx_hf = right_foot[0] - right_hip[0]
        dy_hf = right_foot[1] - right_hip[1]
        
        # 用整数乘法替代浮点除法
        is_horizontal = (dy_sh*dy_sh < dx_sh*dx_sh // 4) or (dy_hf*dy_hf < dx_hf*dx_hf // 4)
        shoulder_width = abs(keypoints[5][0] - keypoints[6][0])
        is_low = abs(right_foot[1] - right_shoulder[1]) < shoulder_width * 7 // 10  # 0.7倍
        
        # 时序状态机
        fall_state = self.fall_history.get(person_id, (0, 0))
        current_state, last_time = fall_state
        if is_horizontal and is_low:
            current_state = min(current_state + 1, 3)  # 最多记录3帧
        else:
            current_state = max(current_state - 1, 0)
        
        self.fall_history[person_id] = (current_state, current_time)
        # 连续2帧检测到跌倒才确认（降低误报）
        return current_state >= 2
    
    def detect_fight(self, keypoints_list, masks, current_time):
        """打架检测（关键点+分割融合）"""
        if len(keypoints_list) < 2 or len(masks) < 2:
            return False
            
        fight_score = 0
        person_count = 0
        
        for kp in keypoints_list:
            if not self._check_keypoint_confidence(kp, [5, 6, 7, 8, 9, 10]):
                continue
            person_count += 1
            
            # 挥拳检测
            left_shoulder, right_shoulder = kp[5], kp[6]
            left_wrist = kp[9]
            right_wrist = kp[10]
            person_id = self._get_person_id(kp)
            
            left_speed = self._calculate_speed(person_id, "left_hand", left_wrist, current_time)
            right_speed = self._calculate_speed(person_id, "right_hand", right_wrist, current_time)
            is_punch = (left_speed > 1000) or (right_speed > 1000)  # 平方速度阈值
            
            if is_punch:
                fight_score += 1
        
        # 分割掩码碰撞检测
        for i in range(len(masks)):
            for j in range(i+1, len(masks)):
                # 简化IOU计算：用边界框交集替代掩码
                mask1 = masks[i]
                mask2 = masks[j]
                y1, x1 = np.where(mask1 > self.mask_threshold)
                y2, x2 = np.where(mask2 > self.mask_threshold)
                if len(x1) < 10 or len(x2) < 10:
                    continue
                    
                # 边界框计算
                bbox1 = (x1.min(), x1.max(), y1.min(), y1.max())
                bbox2 = (x2.min(), x2.max(), y2.min(), y2.max())
                # 计算交集
                ix1 = max(bbox1[0], bbox2[0])
                iy1 = max(bbox1[2], bbox2[2])
                ix2 = min(bbox1[1], bbox2[1])
                iy2 = min(bbox1[3], bbox2[3])
                iw = max(0, ix2 - ix1)
                ih = max(0, iy2 - iy1)
                if iw * ih > 0:
                    fight_score += 1
        
        # 综合判断
        return fight_score > 2 and person_count >= 2
    
    def clean_history(self):
        """清理历史数据（限制内存占用）"""
        current_time = time.time()
        # 清理过期位置数据
        for key in list(self.prev_positions.keys()):
            _, _, timestamp = self.prev_positions[key]
            if current_time - timestamp > 2.0:
                del self.prev_positions[key]
        
        # 清理过期跌倒状态
        for person_id in list(self.fall_history.keys()):
            state, timestamp = self.fall_history[person_id]
            if current_time - timestamp > 3.0:
                del self.fall_history[person_id]
        
        # 清理过期挥拳状态
        for person_id in list(self.punch_history.keys()):
            timestamp = self.punch_history[person_id].get("timestamp", 0)
            if current_time - timestamp > 2.0:
                del self.punch_history[person_id]
        
        gc.collect()  # 主动触发垃圾回收


class IntegratedDetector:
    def __init__(self, keypoint_model, seg_model, display_size, rgb888p_size, debug_mode=0):
        self.keypoint_model = keypoint_model
        self.seg_model = seg_model
        self.behavior_analyzer = BehaviorAnalyzer(display_size, rgb888p_size, debug_mode)
        self.debug_mode = debug_mode
        self.frame_count = 0
    
    def process_frame(self, img):
        current_time = time.time()
        results = {
            "fall": False,
            "run": False,
            "crowd": False,
            "fight": False
        }
        
        # 1. 关键点检测（高优先级动作）
        with ScopedTiming("keypoint", self.debug_mode):
            kp_res = self.keypoint_model.run(img)
            if kp_res[0] and len(kp_res[1]) > 0:
                for keypoints in kp_res[1]:
                    if self.behavior_analyzer.detect_fall(keypoints, current_time):
                        results["fall"] = True
                    if self.behavior_analyzer.detect_run(keypoints, current_time):
                        results["run"] = True
        
        # 2. 分割检测（聚集和打架）
        with ScopedTiming("segmentation", self.debug_mode):
            seg_res = self.seg_model.run(img)
            if seg_res[0] and len(seg_res[1]) > 0:
                # 提取人掩码
                person_masks = []
                for i, label in enumerate(seg_res[1]):
                    if label == 0 and seg_res[2][i] > 0.5:  # 只处理人且高置信度
                        person_masks.append(seg_res[3][i])
                
                if len(person_masks) >= 2:
                    if self.behavior_analyzer.detect_crowd(person_masks, current_time):
                        results["crowd"] = True
                    
                    if self.behavior_analyzer.detect_fight(kp_res[1], person_masks, current_time):
                        results["fight"] = True
        
        # 3. 定期清理历史数据
        self.frame_count = (self.frame_count + 1) % 10
        if self.frame_count == 0:
            self.behavior_analyzer.clean_history()
        
        return results


# 使用示例（假设底层硬件接口已实现）
def main():
    # 初始化模型
    kp_model = KeyPointDetector(
        kmodel_path="/sdcard/kp.kmodel",
        model_input_size=[320, 320],
        rgb888p_size=[640, 480],
        display_size=[640, 480],
        debug_mode=1
    )
    
    seg_model = SegmentationDetector(
        kmodel_path="/sdcard/seg.kmodel",
        labels=["person", "car"],
        model_input_size=[320, 320],
        rgb888p_size=[640, 480],
        display_size=[640, 480],
        debug_mode=1
    )
    
    # 初始化综合检测器
    detector = IntegratedDetector(kp_model, seg_model, [640, 480], [640, 480], debug_mode=1)
    
    # 假设获取图像的函数
    def get_image():
        return np.zeros((480, 640, 3), dtype=np.uint8)
    
    try:
        while True:
            img = get_image()
            results = detector.process_frame(img)
            # 输出结果
            if results["fall"]:
                print("⚠️ 跌倒")
            if results["run"]:
                print("⚠️ 奔跑")
            if results["crowd"]:
                print("⚠️ 聚集")
            if results["fight"]:
                print("⚠️ 打架")
            gc.collect()  # 每帧后触发GC
    finally:
        # 释放资源
        pass


if __name__ == "__main__":
    main()