# 导入pygame用于播放音频
try:
    import pygame
except ImportError as e:
    print("❌ pygame库导入失败，请安装pygame: pip install pygame")
    raise e

try:
    import cv2
except ImportError as e:
    print("❌ OpenCV库导入失败，请安装opencv-python:")
    print("   pip install opencv-python")
    raise e

import numpy as np

try:
    import paho.mqtt.client as mqtt
except ImportError as e:
    print("❌ MQTT库导入失败，请安装paho-mqtt: pip install paho-mqtt")
    raise e

try:
    from ultralytics import YOLO
except ImportError as e:
    print("❌ ultralytics库导入失败，请安装ultralytics: pip install ultralytics")
    raise e

import base64
import time
import random
import json
import threading
import platform
import os
from datetime import datetime
import pytz

# MQTT 配置
broker = 'emqx.digiplc.cn'
port = 1883
control_topic = "camera/control"
feed_topic = "camera/feed"
device_topic = "camera/device"
audio_topic = "camera/audio"  # 新增用于接收音频数据的主题
client_id = f'camera-client-{random.randint(0, 1000)}'
username = 'test'
password = '!1Qazwsxed'

# 设备信息
device_id = f"cam_{random.randint(1000, 9999)}"
device_name = f"摄像头 {device_id}"

# 全局变量
cap = None
is_streaming = False
streaming_lock = threading.Lock()

# 人形检测相关
pose_model = None
last_capture_time = 0
capture_interval = 5  # 每5秒截图一次 (原为3秒)
max_captured_images = 100  # 最多保存100张截图 (原为10张)
max_detected_images = 30   # 最多保存30张检测结果 (新增)
captured_images_dir = "captured_images"
detected_images_dir = "detected_images"

# 摄像头设置
camera_settings = {
    'flip_horizontal': False,
    'flip_vertical': False,
    'red_shift': 0,
    'green_shift': 0,
    'blue_shift': 0
}

# RGB色彩校正参数（用于校正OpenCV默认的BGR格式）
color_correction = {
    'red_gain': 1.0,    # 红色增益
    'green_gain': 1.0,  # 绿色增益
    'blue_gain': 0.8,   # 蓝色增益（降低蓝色避免偏蓝）
    'color_balance': 0.5  # 色彩平衡强度 (0-5%)
}

# 预计算的增益数组，避免重复计算
gain_arrays = {}

# MQTT 客户端
def on_connect(client, userdata, flags, rc):
    if rc == 0:
        print("✅ 本地脚本已连接到 MQTT 服务器")
        client.subscribe(f"{control_topic}/{device_id}")
        client.subscribe(f"{audio_topic}/{device_id}")  # 订阅音频数据主题
        # 发送上线通知
        send_device_status('online')
    else:
        print(f"❌ 连接失败，返回码 {rc}")

def on_message(client, userdata, msg):
    global is_streaming, camera_settings, color_correction
    try:
        # 检查是否是音频数据消息
        if msg.topic == f"{audio_topic}/{device_id}":
            # 在新线程中播放接收到的音频数据
            threading.Thread(target=play_received_audio, args=(msg.payload,), daemon=True).start()
            return
            
        data = json.loads(msg.payload.decode('utf-8'))
        command = data.get('command')
        
        if command == 'start':
            with streaming_lock:
                if not is_streaming:
                    is_streaming = True  # 先设置状态为正在推流
                    start_camera()
        elif command == 'stop':
            with streaming_lock:
                if is_streaming:  # 确保只有在推流时才允许停止
                    is_streaming = False  # 先设置状态为停止推流
                    stop_camera()
        elif command == 'update_settings':
            if 'settings' in data:
                camera_settings.update(data['settings'])
                print(f"🔧 摄像头设置已更新: {camera_settings}")
        elif command == 'update_color_correction':
            if 'color_correction' in data:
                color_correction.update(data['color_correction'])
                # 清除预计算的增益数组，下次使用时重新计算
                global gain_arrays
                gain_arrays = {}
                print(f"🎨 色彩校正参数已更新: {color_correction}")
        elif command == 'play_audio':
            # 获取音频文件参数
            audio_file = None
            if 'audio' in data and 'audio_file' in data['audio']:
                audio_file = data['audio']['audio_file']
            
            # 在新线程中播放音频，避免阻塞MQTT消息处理
            threading.Thread(target=play_audio_file, args=(audio_file,), daemon=True).start()
        elif command == 'play_recorded_audio':
            # 在新线程中播放录制的音频，避免阻塞MQTT消息处理
            threading.Thread(target=play_recorded_audio_file, daemon=True).start()
    except Exception as e:
        print(f"❌ 处理控制消息失败: {e}")

def send_device_status(status):
    """发送设备状态消息"""
    payload = {
        'device_id': device_id,
        'name': device_name,
        'status': status,
        'streaming_status': 'streaming' if is_streaming else 'idle',  # 添加推流状态
        'platform': platform.system()
    }
    client.publish(device_topic, json.dumps(payload))

client = mqtt.Client(client_id)
client.username_pw_set(username, password)
client.on_connect = on_connect
client.on_message = on_message

try:
    client.connect(broker, port, 60)
except Exception as e:
    print(f"❌ MQTT 连接失败: {e}")
    exit()

client.loop_start()

def start_camera():
    global cap, is_streaming, pose_model
    print("📷 正在打开摄像头...")
    cap = cv2.VideoCapture(0)
    if not cap.isOpened():
        print("❌ 无法打开摄像头")
        return False
    
    # 优化摄像头设置
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)  # 减少缓冲区大小
    cap.set(cv2.CAP_PROP_FPS, 30)        # 设置帧率
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)   # 降低分辨率
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)  # 降低分辨率
    
    is_streaming = True
    print("✅ 摄像头已开启，开始推送画面...")
    send_device_status('online')  # 发送更新的状态
    
    # 延迟加载音频服务
    try:
        import pygame
        pygame.mixer.init()
        print("✅ 音频服务已加载")
    except ImportError as e:
        print("⚠️ 音频服务不可用: ", e)
    
    # 延迟加载人形检测模型
    # if pose_model is None:
    #     try:
    #         from ultralytics import YOLO
    #         if os.path.exists('models/yolov8n-pose.pt'):
    #             pose_model = YOLO('models/yolov8n-pose.pt')
    #             print("✅ 人形检测模型已加载")
    #         else:
    #             print("⚠️ 未找到人形检测模型文件，停用人形检测服务")
    #     except ImportError as e:
    #         print("⚠️ 人形检测功能不可用: ", e)
    
    # 在新线程中推流
    threading.Thread(target=stream_camera, daemon=True).start()
    return True

def stop_camera():
    global cap, is_streaming
    is_streaming = False
    if cap and cap.isOpened():
        cap.release()
        print("⏹️ 摄像头已关闭")
    send_device_status('online')  # 发送更新的状态
    return True

def apply_camera_settings(frame):
    """应用摄像头设置（翻转和颜色调整）"""
    # 翻转处理
    if camera_settings['flip_horizontal'] and camera_settings['flip_vertical']:
        frame = cv2.flip(frame, -1)  # 同时水平和垂直翻转
    elif camera_settings['flip_horizontal']:
        frame = cv2.flip(frame, 1)   # 水平翻转
    elif camera_settings['flip_vertical']:
        frame = cv2.flip(frame, 0)   # 垂直翻转
    
    # 颜色调整 - 使用向量化操作提高性能
    if any([camera_settings['red_shift'], camera_settings['green_shift'], camera_settings['blue_shift']]):
        # 使用numpy向量化操作，避免逐像素处理
        frame = frame.astype(np.int16)  # 转换为int16防止溢出
        b, g, r = cv2.split(frame)
        
        # 应用颜色偏移
        if camera_settings['red_shift'] != 0:
            r = cv2.add(r, camera_settings['red_shift'])
        if camera_settings['green_shift'] != 0:
            g = cv2.add(g, camera_settings['green_shift'])
        if camera_settings['blue_shift'] != 0:
            b = cv2.add(b, camera_settings['blue_shift'])
        
        # 合并并裁剪到有效范围
        frame = cv2.merge([b, g, r])
        frame = np.clip(frame, 0, 255).astype(np.uint8)
    
    return frame

def apply_color_correction(frame):
    """应用色彩校正来解决偏蓝问题"""
    # 获取或创建增益数组（缓存机制）
    h, w = frame.shape[:2]
    if (h, w) not in gain_arrays:
        # 预计算增益数组并缓存
        r_gain = np.full((h, w), color_correction['red_gain'], dtype=np.float32)
        g_gain = np.full((h, w), color_correction['green_gain'], dtype=np.float32)
        b_gain = np.full((h, w), color_correction['blue_gain'], dtype=np.float32)
        gain_arrays[(h, w)] = (r_gain, g_gain, b_gain)
    else:
        r_gain, g_gain, b_gain = gain_arrays[(h, w)]
    
    # 分离颜色通道并转换为浮点型
    b, g, r = cv2.split(frame.astype(np.float32))
    
    # 应用增益调整
    r = cv2.multiply(r, r_gain)
    g = cv2.multiply(g, g_gain)
    b = cv2.multiply(b, b_gain)
    
    # 合并并裁剪到有效范围
    corrected_frame = cv2.merge([b, g, r])
    corrected_frame = np.clip(corrected_frame, 0, 255).astype(np.uint8)
    
    return corrected_frame

def color_balance_algorithm(img, percent=1):
    """
    实现色彩平衡算法
    
    Args:
        img: 输入图像 (BGR格式)
        percent: 百分比裁剪值，用于确定色彩平衡范围 (默认为1)
        
    Returns:
        色彩平衡后的图像
    """
    if percent <= 0:
        percent = 0.1  # 避免完全为0的情况
    
    # 确保输入是正确的格式
    if img.dtype != np.uint8:
        img = img.astype(np.uint8)
    
    # 分离颜色通道
    b, g, r = cv2.split(img)
    channels = [b, g, r]
    adjusted_channels = []
    
    # 对每个通道进行色彩平衡
    for channel in channels:
        # 计算直方图
        hist, _ = np.histogram(channel.flatten(), 256, [0, 256])
        
        # 计算累积分布
        cumsum = np.cumsum(hist)
        total = cumsum[-1]
        
        # 计算最小和最大阈值
        min_val = np.searchsorted(cumsum, total * percent / 100)
        max_val = np.searchsorted(cumsum, total * (100 - percent) / 100)
        
        # 确保值在有效范围内
        min_val = max(0, min_val)
        max_val = min(255, max_val)
        
        # 防止除以零
        if max_val <= min_val:
            max_val = min_val + 1
            
        # 应用线性拉伸
        # 将[min_val, max_val]范围映射到[0, 255]
        adjusted = np.clip((channel.astype(np.float32) - min_val) * 255.0 / (max_val - min_val), 0, 255)
        adjusted_channels.append(adjusted.astype(np.uint8))
    
    # 合并通道
    balanced_img = cv2.merge(adjusted_channels)
    return balanced_img

def apply_advanced_color_correction(frame):
    """
    应用高级色彩校正（包括色彩平衡算法）
    """
    # 首先应用原有的增益调整
    h, w = frame.shape[:2]
    if (h, w) not in gain_arrays:
        # 预计算增益数组并缓存
        r_gain = np.full((h, w), color_correction['red_gain'], dtype=np.float32)
        g_gain = np.full((h, w), color_correction['green_gain'], dtype=np.float32)
        b_gain = np.full((h, w), color_correction['blue_gain'], dtype=np.float32)
        gain_arrays[(h, w)] = (r_gain, g_gain, b_gain)
    else:
        r_gain, g_gain, b_gain = gain_arrays[(h, w)]
    
    # 分离颜色通道并转换为浮点型
    b, g, r = cv2.split(frame.astype(np.float32))
    
    # 应用增益调整
    r = cv2.multiply(r, r_gain)
    g = cv2.multiply(g, g_gain)
    b = cv2.multiply(b, b_gain)
    
    # 合并并裁剪到有效范围
    corrected_frame = cv2.merge([b, g, r])
    corrected_frame = np.clip(corrected_frame, 0, 255).astype(np.uint8)
    
    # 应用色彩平衡算法（如果启用）
    if color_correction.get('color_balance', 0) > 0:
        balance_percent = color_correction['color_balance']
        balanced_frame = color_balance_algorithm(corrected_frame, percent=balance_percent)
        return balanced_frame
    
    return corrected_frame

def capture_and_detect(frame):
    """保存截图并进行人形检测"""
    global last_capture_time
    
    current_time = time.time()
    if current_time - last_capture_time < capture_interval:
        return  # 每5秒执行一次 (原为3秒)
    
    last_capture_time = current_time
    
    try:
        # 创建文件夹如果不存在
        os.makedirs(captured_images_dir, exist_ok=True)
        os.makedirs(detected_images_dir, exist_ok=True)
        
        # 生成北京时间文件名
        beijing_time = datetime.now(pytz.timezone('Asia/Shanghai'))
        timestamp = beijing_time.strftime("%Y%m%d%H%M%S")
        captured_filename = f"{captured_images_dir}/{timestamp}.jpg"
        detected_filename = f"{detected_images_dir}/{timestamp}.jpg"
        
        # 降低截图质量以减少磁盘IO和存储空间
        capture_params = [int(cv2.IMWRITE_JPEG_QUALITY), 80]
        
        # 保存原始截图
        cv2.imwrite(captured_filename, frame, capture_params)
        
        # 获取目录中的所有文件
        captured_files = sorted([f for f in os.listdir(captured_images_dir) if f.endswith('.jpg')])
        detected_files = sorted([f for f in os.listdir(detected_images_dir) if f.endswith('.jpg')])
        
        # 删除最旧的文件，保持最多100张截图 (原为10张)
        while len(captured_files) > max_captured_images:
            os.remove(os.path.join(captured_images_dir, captured_files[0]))
            captured_files.pop(0)
        
        # 删除最旧的文件，保持最多30张检测结果 (原为10张)
        while len(detected_files) > max_detected_images:
            os.remove(os.path.join(detected_images_dir, detected_files[0]))
            detected_files.pop(0)
        
        # 进行人形检测
        # if pose_model is not None:
        #     results = pose_model(frame)
        #     
        #     # 只有人体被检测到时才保存检测结果
        #     human_detected = False
        #     for r in results:
        #         # 检查是否检测到了人体（YOLO pose检测会返回关键点）
        #         if hasattr(r, 'keypoints') and r.keypoints is not None:
        #             # 如果有关键点数据，则认为检测到了人体
        #             human_detected = True
        #             im_array = r.plot()  # 在图像上绘制检测结果
        #             cv2.imwrite(detected_filename, im_array, capture_params)
        #             print(f"📸 [{timestamp}] 截图已保存，检测到人体")
        #             break  # 只处理第一个结果
        #     
        #     # 如果没有人被检测到，则输出相应信息
        #     if not human_detected:
        #         print(f"📸 [{timestamp}] 截图已保存，未检测到人体")
        # else:
        print(f"📸 [{timestamp}] 截图已保存，人形检测未启用")
            
    except Exception as e:
        print(f"❌ 截图或检测失败: {e}")

def stream_camera():
    global is_streaming
    last_send_time = 0
    send_interval = 0.067  # 约15fps，从0.033(30fps)改为0.067(15fps)
    
    # 预分配缓冲区，进一步降低质量
    encode_params = [cv2.IMWRITE_JPEG_QUALITY, 25]  # 从30降低到25
    
    while is_streaming:
        try:
            current_time = time.time()
            if current_time - last_send_time < send_interval:
                # 使用更短的睡眠时间以提高响应性
                time.sleep(0.001)
                continue
                
            # 读取最新帧，丢弃缓冲区中旧的帧
            ret, frame = cap.read()
            if not ret:
                print("⚠️ 无法读取摄像头画面")
                time.sleep(0.01)
                continue
            
            # 应用摄像头设置
            frame = apply_camera_settings(frame)
            
            # 应用色彩校正来解决偏蓝问题
            frame = apply_color_correction(frame)
            
            # 将BGR转换为RGB格式用于网页显示
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            
            # 压缩图像，降低质量以提高性能
            _, buffer = cv2.imencode('.jpg', frame, encode_params)
            
            # 转为 base64 字符串
            jpg_as_text = base64.b64encode(buffer).decode('utf-8')
            
            # 发布到 MQTT（包含设备ID）
            result = client.publish(f"{feed_topic}/{device_id}", jpg_as_text, qos=0)
            if result.rc != 0:
                print(f"❌ 发布失败: {result.rc}")
            
            last_send_time = current_time
            
        except Exception as e:
            print(f"❌ 推流过程中出错: {e}")
            time.sleep(0.01)
    
    print("⏹️ 推流已停止")

def play_audio_file(audio_file=None):
    """播放音频文件"""
    try:
        # 初始化pygame mixer
        pygame.mixer.init()
        
        # 如果没有指定音频文件，则使用默认文件
        if audio_file is None:
            audio_file = "audio/yu.MP3"
        
        # 加载并播放音频文件
        if os.path.exists(audio_file):
            pygame.mixer.music.load(audio_file)
            pygame.mixer.music.play()
            
            # 等待音频播放完成
            while pygame.mixer.music.get_busy():
                time.sleep(0.1)
            
            print(f"🔊 音频播放完成: {audio_file}")
        else:
            print(f"❌ 音频文件不存在: {audio_file}")
        
    except Exception as e:
        print(f"❌ 播放音频失败: {e}")
        
    finally:
        pygame.mixer.quit()

def play_recorded_audio_file():
    """播放录制的音频文件"""
    try:
        # 初始化pygame mixer
        pygame.mixer.init()
        
        # 加载并播放录制的音频文件 (修复文件扩展名问题)
        audio_path = "audio/recorded_audio.webm"
        if os.path.exists(audio_path):
            pygame.mixer.music.load(audio_path)
            pygame.mixer.music.play()
            
            # 等待音频播放完成
            while pygame.mixer.music.get_busy():
                time.sleep(0.1)
                
            print(f"🔊 录制的音频播放完成: {audio_path}")
        else:
            print(f"❌ 录制的音频文件不存在: {audio_path}")
            
    except Exception as e:
        print(f"❌ 播放录制的音频失败: {e}")
        
    finally:
        pygame.mixer.quit()

def play_received_audio(audio_data):
    """播放通过MQTT接收到的音频数据"""
    try:
        # 初始化pygame mixer
        pygame.mixer.init()
        
        # 将base64编码的音频数据解码为字节
        audio_bytes = base64.b64decode(audio_data)
        
        # 将字节数据保存到临时文件
        temp_audio_path = "audio/temp_received_audio.mp3"
        with open(temp_audio_path, "wb") as f:
            f.write(audio_bytes)
        
        # 加载并播放音频文件
        pygame.mixer.music.load(temp_audio_path)
        pygame.mixer.music.play()
        
        # 等待音频播放完成
        while pygame.mixer.music.get_busy():
            time.sleep(0.1)
        
        print(f"🔊 接收到的音频播放完成")
        
        # 删除临时文件
        os.remove(temp_audio_path)
        
    except Exception as e:
        print(f"❌ 播放接收到的音频失败: {e}")
        
    finally:
        pygame.mixer.quit()

# 定期发送心跳包
def send_heartbeat():
    while True:
        if client.is_connected():
            send_device_status('online')
        time.sleep(20)

threading.Thread(target=send_heartbeat, daemon=True).start()

print(f"📡 摄像头客户端 {device_id} 已就绪，等待控制指令...")

# 保持程序运行
try:
    while True:
        time.sleep(1)
except KeyboardInterrupt:
    print("\n👋 正在关闭程序...")
    send_device_status('offline')
    stop_camera()
    client.loop_stop()
    client.disconnect()