# -*- coding: utf-8 -*-
import asyncio
import time
import cv2
import argparse

import numpy as np

from server.ws_server import start_ws_server, broadcast_json, get_clients, attach_manager, send_video_frame_to_subscribers, get_subscribers
from vision.mediapipe_runner import MediaPipeRunner
from vision.draw_pose import draw_landmarks
from recognition.manager import ActionRecognitionManager
from common.protocol import make_actions_msg
from common.config import ACTION_THRESHOLD, TOP_K, HOST, PORT

# 动作模块（带历史状态）
from recognition.actions.squat import SquatAction
from recognition.actions.wave import WaveAction
from recognition.actions.jump import JumpAction
from recognition.actions.swipe import Swipe

ACTION_REGISTRY = {
    "squat": SquatAction,
    "wave": WaveAction,
    "jump": JumpAction,
    "swipe": Swipe,
}

def parse_args():
    parser = argparse.ArgumentParser(description="MediaPipe + WebSocket Action Recognition")
    parser.add_argument(
        "--actions",
        type=str,
        default="swipe",  # 默认值可以按你的当前行为：只注册 Swipe
        help=(
            "Comma-separated action names to register. "
            "Use 'all' for all actions, 'none' for no actions. "
            f"Available: {', '.join(sorted(ACTION_REGISTRY.keys()))}"
        ),
    )
    parser.add_argument(
        "--camera-index",
        type=int,
        default=0,
        help="Camera index for MediaPipeRunner (default: 0)",
    )
    parser.add_argument(
        "--host",
        type=str,
        default=HOST,
        help=f"WebSocket host (default: {HOST})",
    )
    parser.add_argument(
        "--port",
        type=int,
        default=PORT,
        help=f"WebSocket port (default: {PORT})",
    )
    return parser.parse_args()


def resolve_actions_arg(arg_value: str):
    if not arg_value:
        return []

    value = arg_value.strip().lower()
    if value == "all":
        return list(ACTION_REGISTRY.values())
    if value == "none":
        return []

    names = [n.strip().lower() for n in value.split(",") if n.strip()]
    constructors = []
    unknown = []
    for name in names:
        ctor = ACTION_REGISTRY.get(name)
        if ctor is None:
            unknown.append(name)
        else:
            constructors.append(ctor)

    if unknown:
        print(f"[WARN] Unknown actions ignored: {', '.join(unknown)}. "
              f"Available: {', '.join(sorted(ACTION_REGISTRY.keys()))}")
    return constructors

async def vision_loop(mp_runner, manager):
    async for frame_result in mp_runner.run():
        ts = time.time()
        persons = frame_result.persons
        frame = frame_result.frame_bgr  # 拿到当前帧

        manager.update_latest_poses(persons)

        # 识别 + 广播
        latest = manager.get_all_latest_poses()
        for person_id, lms in latest.items():
            actions = manager.recognize_actions(lms, person_id)
            top_actions = manager.select_top_actions(actions, threshold=ACTION_THRESHOLD, topk=TOP_K)
            if top_actions:
                msg = make_actions_msg(ts, person_id, top_actions)
                print(msg)
                if get_clients():
                    await broadcast_json(msg)

            # 绘制该人的关键点/骨架
            draw_landmarks(frame, lms)

        # 叠加状态文字
        info = f"WS ws://{HOST}:{PORT} | Persons={len(persons)} | ESC to quit"
        cv2.putText(frame, info, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)

        if get_subscribers():
            # BGR -> RGBA8
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # uint8, shape (H,W,4)
            h, w, c = frame_rgb.shape
            if not frame_rgb.flags["C_CONTIGUOUS"]:
                frame_rgb = np.ascontiguousarray(frame_rgb)
            payload = frame_rgb.tobytes()
            await send_video_frame_to_subscribers(w, h, c, 1, payload)  # fmt=0=RGBA8 fmt=1=RGB8

        cv2.imshow("Pose Preview", frame)
        # 注意：imshow/GUI 与 asyncio 并不完美兼容，这里用 waitKey(1)
        if cv2.waitKey(1) & 0xFF == 27:
            break

    # 退出时清理窗口
    cv2.destroyAllWindows()

async def main():
    args = parse_args()

    # 启动 WebSocket 服务器
    server = await start_ws_server(host=args.host, port=args.port)
    print(f"[WS] running at ws://{args.host}:{args.port}")

    # 初始化 MediaPipe 采集与识别管理器
    mp_runner = MediaPipeRunner(camera_index=args.camera_index)
    manager = ActionRecognitionManager()

    action_ctors = resolve_actions_arg(args.actions)
    if action_ctors:
        for ctor in action_ctors:
            manager.register_action(ctor())
        print(f"[INFO] Registered actions: {', '.join(sorted(ACTION_REGISTRY.keys() & set([name for name in args.actions.lower().split(',')] )))}")
    else:
        print("[INFO] No actions registered.")

    # 注册动作（可随时扩展）
    # manager.register_action(SquatAction())
    # manager.register_action(WaveAction())
    # manager.register_action(JumpAction())

    # manager.register_action(SwipeLeft())
    # manager.register_action(SwipeRight())
    # manager.register_action(Swipe())
    # manager.register_action(SwipeDown())

    # 绑定 manager 给 WS 层，支持客户端“关节查询”协议
    attach_manager(manager)

    try:
        await vision_loop(mp_runner, manager)
    finally:
        await server.wait_closed()


if __name__ == "__main__":
    asyncio.run(main())
