import cv2
import numpy as np
import asyncio
import logging
import threading
import time
from queue import Queue
from go2_webrtc_driver.webrtc_driver import Go2WebRTCConnection, WebRTCConnectionMethod
from aiortc import MediaStreamTrack
from ultralytics import YOLO
from aiohttp import web
import base64
import json

# Enable logging for debugging
logging.basicConfig(level=logging.FATAL)

# WebSocket连接集合
websockets = set()


async def websocket_handler(request):
    ws = web.WebSocketResponse()
    await ws.prepare(request)

    websockets.add(ws)
    print(f"New WebSocket connection. Total connections: {len(websockets)}")

    try:
        async for msg in ws:
            if msg.type == web.WSMsgType.TEXT:
                print(f"Message from client: {msg.data}")
            elif msg.type == web.WSMsgType.ERROR:
                print(f"WebSocket error: {ws.exception()}")
    finally:
        websockets.remove(ws)
        print(f"WebSocket disconnected. Remaining connections: {len(websockets)}")
    return ws


async def send_frame_to_clients(frame):
    """将帧发送给所有连接的WebSocket客户端"""
    if not websockets:
        return

    # 将帧转换为JPEG格式
    _, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 70])
    jpg_as_text = base64.b64encode(buffer).decode('utf-8')

    # 创建消息
    message = json.dumps({
        'type': 'frame',
        'data': jpg_as_text
    })

    # 发送给所有客户端
    for ws in set(websockets):
        try:
            await ws.send_str(message)
        except:
            websockets.remove(ws)
            print(f"Removed disconnected WebSocket. Remaining: {len(websockets)}")


async def start_websocket_server():
    """启动WebSocket服务器"""
    app = web.Application()
    app.router.add_route('GET', '/ws', websocket_handler)

    runner = web.AppRunner(app)
    await runner.setup()
    site = web.TCPSite(runner, '0.0.0.0', 8080)
    await site.start()
    print("WebSocket server started at ws://0.0.0.0:8080/ws")


def main():
    # model = YOLO("yolo11n.pt").to("cuda")  # ✅ 替换为你的模型路径
    model = YOLO("yolo11n.pt")  # ✅ 替换为你的模型路径
    print("✅ YOLOv11n 模型加载成功，支持类别：", model.names)

    frame_queue = Queue()

    # Choose a connection method (uncomment the correct one)
    conn = Go2WebRTCConnection(WebRTCConnectionMethod.LocalSTA, ip="192.168.123.161")

    # conn = Go2WebRTCConnection(WebRTCConnectionMethod.LocalSTA, serialNumber="B42D2000XXXXXXXX")
    # conn = Go2WebRTCConnection(WebRTCConnectionMethod.Remote, serialNumber="B42D2000XXXXXXXX", username="email@gmail.com", password="pass")
    # conn = Go2WebRTCConnection(WebRTCConnectionMethod.LocalAP)

    # Async function to receive video frames and put them in the queue
    async def recv_camera_stream(track: MediaStreamTrack):
        while True:
            frame = await track.recv()
            # Convert the frame to a NumPy array
            img = frame.to_ndarray(format="bgr24")
            frame_queue.put(img)

    def run_asyncio_loop(loop):
        asyncio.set_event_loop(loop)

        async def setup():
            try:
                # 启动WebSocket服务器
                asyncio.create_task(start_websocket_server())

                # Connect to the device
                await conn.connect()

                # Switch video channel on and start receiving video frames
                conn.video.switchVideoChannel(True)

                # Add callback to handle received video frames
                conn.video.add_track_callback(recv_camera_stream)
            except Exception as e:
                logging.error(f"Error in WebRTC connection: {e}")

        # Run the setup coroutine and then start the event loop
        loop.run_until_complete(setup())
        loop.run_forever()

    # Create a new event loop for the asyncio code
    loop = asyncio.new_event_loop()

    # Start the asyncio event loop in a separate thread
    asyncio_thread = threading.Thread(target=run_asyncio_loop, args=(loop,))
    asyncio_thread.start()

    # 每隔 N 帧才处理一次
    frame_counter = 0
    annotated_img = None

    try:
        while True:
            if not frame_queue.empty():
                img = frame_queue.get()
                print(f"Shape: {img.shape}, Dimensions: {img.ndim}, Type: {img.dtype}, Size: {img.size}")

                # 每 3 帧识别一次（降低频率）
                if frame_counter % 9 == 0:
                    results = model(img, verbose=False)[0]
                    annotated_img = results.plot()

                    if results.boxes is not None and len(results.boxes) > 0:
                        print(f"🎯 检测到 {len(results.boxes)} 个目标")
                    else:
                        print("⚠️ 未检测到目标")
                frame_counter += 1

                # 如果没有标注图像，使用原始图像
                if annotated_img is None:
                    annotated_img = img

                # 显示帧
                cv2.imshow('Video', annotated_img)

                # 通过WebSocket发送帧
                if loop.is_running():
                    asyncio.run_coroutine_threadsafe(send_frame_to_clients(annotated_img), loop)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                # Sleep briefly to prevent high CPU usage
                time.sleep(0.01)
    finally:
        cv2.destroyAllWindows()
        # Stop the asyncio event loop
        loop.call_soon_threadsafe(loop.stop)
        asyncio_thread.join()


if __name__ == "__main__":
    main()