#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
运动检测模块 - 检测物体是否静止，只在静止时进行键盘检测
"""

import cv2
import numpy as np
import numpy.typing as npt
from typing import Tuple, Optional
import time

class MotionDetector:
    def __init__(self, motion_threshold: float = 15.0, stability_frames: int = 5):
        """
        初始化运动检测器
        
        Args:
            motion_threshold: 运动阈值，像素差异超过此值认为有运动
            stability_frames: 需要连续静止的帧数
        """
        self.motion_threshold = motion_threshold
        self.stability_frames = stability_frames
        self.previous_frame = None
        self.stable_frame_count = 0
        self.last_detection_time = 0
        self.min_detection_interval = 1.0  # 最小检测间隔（秒）
        
        # 帧对比相关
        self.last_detected_frame = None  # 上次检测时的帧
        self.frame_similarity_threshold = 0.85  # 帧相似度阈值（降低要求）
        self.has_detected_once = False  # 是否已经检测过一次
        self.last_frame_hash = None  # 上一帧的哈希值
        
        # 优化运动检测参数
        self.motion_history = []  # 运动历史记录
        self.motion_history_size = 8  # 历史记录大小
        self.stable_time_threshold = 1.0  # 稳定时间阈值（秒）
        self.last_stable_time = 0  # 上次稳定时间
        self.current_time = 0  # 当前时间
        
        # 背景减法器（降低阈值，提高灵敏度）
        self.bg_subtractor = cv2.createBackgroundSubtractorMOG2(
            detectShadows=True, varThreshold=30, history=200
        )
        
    def is_stable(self, current_frame: npt.NDArray) -> bool:
        """
        检测当前帧是否稳定（物体静止）- 优化版
        
        Args:
            current_frame: 当前帧图像
            
        Returns:
            bool: True表示物体静止，可以进行检测
        """
        if current_frame is None:
            return False
            
        # 转换为灰度图像
        if len(current_frame.shape) == 3:
            gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
        else:
            gray = current_frame.copy()
            
        # 应用背景减法
        fg_mask = self.bg_subtractor.apply(gray)
        
        # 形态学操作去除噪声（减少操作强度）
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel)
        
        # 计算运动像素数量
        motion_pixels = np.sum(fg_mask > 0)
        total_pixels = gray.shape[0] * gray.shape[1]
        motion_ratio = motion_pixels / total_pixels
        
        # 更宽松的稳定性判断
        # 1. 如果运动比例很低，直接认为稳定
        if motion_ratio < 0.01:  # 1%以下认为静止
            self.stable_frame_count += 1
        # 2. 如果运动比例在可接受范围内，也认为稳定
        elif motion_ratio < 0.03:  # 3%以下认为基本静止
            self.stable_frame_count += 1
        # 3. 如果运动比例较高，重置计数
        else:
            self.stable_frame_count = 0
        
        # 更快的稳定性判断 - 只需要3帧稳定
        return self.stable_frame_count >= 3
    
    def should_detect(self, current_frame: npt.NDArray) -> bool:
        """
        判断是否应该进行键盘检测
        
        Args:
            current_frame: 当前帧图像
            
        Returns:
            bool: True表示应该进行检测
        """
        current_time = time.time()
        
        # 检查时间间隔（缩短到0.5秒）
        if current_time - self.last_detection_time < 0.5:
            return False
            
        # 检查是否稳定
        if not self.is_stable(current_frame):
            # 如果物体在移动，重置检测状态
            self.has_detected_once = False
            self.last_detected_frame = None
            return False
        
        # 如果还没有检测过，直接进行检测
        if not self.has_detected_once:
            self.has_detected_once = True
            self.last_detected_frame = current_frame.copy()
            self.last_detection_time = current_time
            return True
        
        # 如果已经检测过，检查当前帧是否与上次检测的帧相似
        if self.last_detected_frame is not None:
            # 使用快速哈希比较
            current_hash = self._calculate_frame_hash(current_frame)
            last_hash = self._calculate_frame_hash(self.last_detected_frame)
            
            # 如果哈希值相同，说明画面没有变化，不需要重新检测
            if current_hash == last_hash:
                return False
            
            # 如果哈希值不同，计算详细相似度
            similarity = self._calculate_frame_similarity(current_frame, self.last_detected_frame)
            
            # 如果相似度很高，说明画面基本没有变化，不需要重新检测（降低阈值）
            if similarity > 0.80:
                return False
        
        # 画面有变化，需要重新检测
        self.last_detected_frame = current_frame.copy()
        self.last_detection_time = current_time
        return True
    
    def get_motion_info(self, current_frame: npt.NDArray) -> dict:
        """
        获取运动信息
        
        Args:
            current_frame: 当前帧图像
            
        Returns:
            dict: 包含运动状态信息的字典
        """
        if current_frame is None:
            return {"motion_ratio": 0, "is_stable": False, "stable_frames": 0}
            
        # 转换为灰度图像
        if len(current_frame.shape) == 3:
            gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
        else:
            gray = current_frame.copy()
            
        # 应用背景减法
        fg_mask = self.bg_subtractor.apply(gray)
        
        # 形态学操作
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
        fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel)
        fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel)
        
        # 计算运动信息
        motion_pixels = np.sum(fg_mask > 0)
        total_pixels = gray.shape[0] * gray.shape[1]
        motion_ratio = motion_pixels / total_pixels
        
        return {
            "motion_ratio": motion_ratio,
            "is_stable": self.stable_frame_count >= self.stability_frames,
            "stable_frames": self.stable_frame_count,
            "motion_mask": fg_mask
        }
    
    def reset(self):
        """重置检测器状态"""
        self.stable_frame_count = 0
        self.last_detection_time = 0
        self.previous_frame = None
        self.last_detected_frame = None
        self.has_detected_once = False
        self.last_frame_hash = None
        self.bg_subtractor = cv2.createBackgroundSubtractorMOG2(
            detectShadows=True, varThreshold=50, history=500
        )
    
    def _calculate_frame_similarity(self, frame1: npt.NDArray, frame2: npt.NDArray) -> float:
        """
        计算两帧之间的相似度
        
        Args:
            frame1: 第一帧
            frame2: 第二帧
            
        Returns:
            float: 相似度 (0-1, 1表示完全相同)
        """
        if frame1 is None or frame2 is None:
            return 0.0
            
        # 确保两帧尺寸相同
        if frame1.shape != frame2.shape:
            # 调整尺寸
            h, w = frame1.shape[:2]
            frame2 = cv2.resize(frame2, (w, h))
        
        # 转换为灰度图像
        if len(frame1.shape) == 3:
            gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        else:
            gray1 = frame1.copy()
            
        if len(frame2.shape) == 3:
            gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
        else:
            gray2 = frame2.copy()
        
        # 计算结构相似性指数 (SSIM)
        similarity = self._calculate_ssim(gray1, gray2)
        
        return similarity
    
    def _calculate_ssim(self, img1: npt.NDArray, img2: npt.NDArray) -> float:
        """
        计算结构相似性指数 (SSIM)
        
        Args:
            img1: 第一张图像
            img2: 第二张图像
            
        Returns:
            float: SSIM值 (0-1)
        """
        # 确保图像数据类型正确
        img1 = img1.astype(np.float64)
        img2 = img2.astype(np.float64)
        
        # SSIM参数
        C1 = (0.01 * 255) ** 2
        C2 = (0.03 * 255) ** 2
        
        # 计算均值
        mu1 = cv2.GaussianBlur(img1, (11, 11), 1.5)
        mu2 = cv2.GaussianBlur(img2, (11, 11), 1.5)
        
        # 计算方差和协方差
        mu1_sq = mu1 ** 2
        mu2_sq = mu2 ** 2
        mu1_mu2 = mu1 * mu2
        
        sigma1_sq = cv2.GaussianBlur(img1 ** 2, (11, 11), 1.5) - mu1_sq
        sigma2_sq = cv2.GaussianBlur(img2 ** 2, (11, 11), 1.5) - mu2_sq
        sigma12 = cv2.GaussianBlur(img1 * img2, (11, 11), 1.5) - mu1_mu2
        
        # 计算SSIM
        numerator = (2 * mu1_mu2 + C1) * (2 * sigma12 + C2)
        denominator = (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
        
        ssim_map = numerator / denominator
        
        # 返回平均SSIM值
        return np.mean(ssim_map)
    
    def _calculate_frame_hash(self, frame: npt.NDArray) -> str:
        """
        计算帧的哈希值（用于快速比较）
        
        Args:
            frame: 输入帧
            
        Returns:
            str: 帧的哈希值
        """
        if frame is None:
            return ""
            
        # 转换为灰度图像
        if len(frame.shape) == 3:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        else:
            gray = frame.copy()
        
        # 调整尺寸为8x8
        small = cv2.resize(gray, (8, 8))
        
        # 计算平均值
        avg = small.mean()
        
        # 生成哈希
        hash_str = ""
        for i in range(8):
            for j in range(8):
                if small[i, j] > avg:
                    hash_str += "1"
                else:
                    hash_str += "0"
        
        return hash_str
