import cv2
import numpy as np
import requests
import json
import time
import argparse
import mediapipe as mp
from collections import deque
import sys
import locale

# 设置控制台输出编码为UTF-8
if sys.platform == 'win32':
    # 设置Windows控制台输出编码
    sys.stdout.reconfigure(encoding='utf-8')
    # 获取当前系统默认编码
    system_encoding = locale.getpreferredencoding()
    print(f"系统默认编码: {system_encoding}")
    print("已将控制台输出编码设置为UTF-8")

# MediaPipe初始化
mp_holistic = mp.solutions.holistic
mp_drawing = mp.solutions.drawing_utils

def get_sys_parameter(server_url):
    """获取系统参数"""
    url = f"{server_url}/getSysParameter"
    try:
        response = requests.get(url)
        return response.json()
    except requests.exceptions.ConnectionError:
        print("无法连接到服务器，请确保服务器已启动")
        return {"sucess": False}

def predict_sign_language(server_url, skeleton_data, frame_len, keyframes_num):
    """预测手语"""
    url = f"{server_url}/predict"
    data = {
        "keyframes_num": str(keyframes_num),
        "frame_len": str(frame_len),
        "skeleton_data": skeleton_data.flatten().tolist()
    }
    try:
        response = requests.post(url, data=json.dumps(data))
        return response.json()
    except requests.exceptions.ConnectionError:
        print("无法连接到服务器，请确保服务器已启动")
        return {"sucess": False}

def extract_skeleton_from_frame(frame, holistic):
    """从视频帧中提取骨骼数据"""
    # 转换为RGB（MediaPipe需要RGB格式）
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    
    # 设置为不可写以提高性能
    image.flags.writeable = False
    
    # 处理图像
    results = holistic.process(image)
    
    # 恢复为可写
    image.flags.writeable = True
    
    # 转回BGR格式
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    
    # 提取关键点数据
    skeleton_data = []
    
    # 处理姿势关键点
    if results.pose_landmarks:
        for landmark in results.pose_landmarks.landmark:
            skeleton_data.extend([landmark.x, landmark.y])
    
    # 处理左手关键点
    if results.left_hand_landmarks:
        for landmark in results.left_hand_landmarks.landmark:
            skeleton_data.extend([landmark.x, landmark.y])
    
    # 处理右手关键点
    if results.right_hand_landmarks:
        for landmark in results.right_hand_landmarks.landmark:
            skeleton_data.extend([landmark.x, landmark.y])
    
    # 绘制骨骼
    if results.pose_landmarks:
        mp_drawing.draw_landmarks(
            image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
    if results.left_hand_landmarks:
        mp_drawing.draw_landmarks(
            image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
    if results.right_hand_landmarks:
        mp_drawing.draw_landmarks(
            image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
    
    return image, np.array(skeleton_data)

def normalize_skeleton_data(skeleton_data, target_length):
    """将提取的骨骼数据标准化为目标长度"""
    # 如果数据比目标长，截断
    if len(skeleton_data) > target_length:
        return skeleton_data[:target_length]
    
    # 如果数据比目标短，填充
    elif len(skeleton_data) < target_length:
        # 填充零或重复现有数据
        padding = np.zeros(target_length - len(skeleton_data))
        return np.concatenate([skeleton_data, padding])
    
    # 长度正好
    return skeleton_data

def camera_based_recognition(server_url, keyframes_num, frame_len):
    """基于摄像头的手语识别"""
    cap = cv2.VideoCapture(0)  # 打开默认摄像头
    
    if not cap.isOpened():
        print("无法打开摄像头，请检查摄像头连接")
        return
    
    # 初始化MediaPipe
    with mp_holistic.Holistic(
        min_detection_confidence=0.5,
        min_tracking_confidence=0.5) as holistic:
        
        # 用于存储最近的骨骼数据帧
        frames_buffer = deque(maxlen=keyframes_num)
        
        # 状态变量
        is_recording = False
        prediction = "Waiting..."  # 使用英文避免编码问题
        countdown = 0
        
        # 设置字体 - 对于中文，需要使用支持中文的字体
        # 在Windows中，可以使用SimHei等字体
        font = cv2.FONT_HERSHEY_SIMPLEX
        
        while True:
            ret, frame = cap.read()
            if not ret:
                print("无法获取视频帧")
                break
            
            # 调整尺寸以提高性能
            frame = cv2.resize(frame, (640, 480))
            
            # 提取骨骼
            processed_frame, skeleton = extract_skeleton_from_frame(frame, holistic)
            
            # 标准化骨骼数据
            normalized_skeleton = normalize_skeleton_data(skeleton, frame_len)
            
            # 如果正在记录，存储帧
            if is_recording:
                frames_buffer.append(normalized_skeleton)
                
                # 显示录制中的进度条
                progress = len(frames_buffer) / keyframes_num * 100
                cv2.rectangle(processed_frame, (20, 20), (int(20 + progress * 6), 40), (0, 255, 0), -1)
                
                # 如果收集了足够的帧，发送识别请求
                if len(frames_buffer) == keyframes_num:
                    # 准备数据
                    skeleton_data = np.array(list(frames_buffer))
                    
                    # 发送请求
                    print("发送识别请求...")
                    result = predict_sign_language(server_url, skeleton_data, frame_len, keyframes_num)
                    
                    if result.get("sucess"):
                        prediction = result.get("prediction", "Unknown")
                        print(f"识别结果: {prediction}")
                    else:
                        prediction = "Failed"
                        print("识别失败")
                    
                    # 重置状态
                    is_recording = False
                    countdown = 60  # 显示结果的帧数
            
            # 显示信息 - 使用英文或拼音显示
            cv2.putText(processed_frame, f"Result: {prediction}", (20, 60), 
                        font, 0.6, (0, 255, 0), 2)
            
            if countdown > 0:
                countdown -= 1
            
            # 显示指导信息 - 使用英文
            cv2.putText(processed_frame, "Press 'R' to Record, 'Q' to Quit", (20, 450), 
                        font, 0.5, (255, 255, 255), 1)
            
            # 显示处理后的帧
            cv2.imshow('Sign Language Recognition', processed_frame)
            
            # 键盘控制
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
            elif key == ord('r') and not is_recording:
                is_recording = True
                frames_buffer.clear()
                prediction = "Recording..."
                print("开始录制...")
        
        # 释放资源
        cap.release()
        cv2.destroyAllWindows()

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="基于摄像头的手语识别客户端")
    parser.add_argument("--server_url", type=str, default="http://localhost:60504", help="服务器URL")
    args = parser.parse_args()
    
    # 获取系统参数
    print("正在获取系统参数...")
    sys_params = get_sys_parameter(args.server_url)
    print(f"系统参数: {sys_params}")
    
    if not sys_params.get("sucess"):
        print("获取系统参数失败，请确保服务器已启动")
        exit(1)
    
    # 获取关键参数
    keyframes_num = int(sys_params.get("keyframes_num", 36))
    frame_len = int(sys_params.get("frame_len", 24))
    
    # 打印中英文测试信息，检查编码是否正常
    print("=====================")
    print("编码测试/Encoding Test")
    print("中文显示测试")
    print("English Display Test")
    print("=====================")
    
    # 开始基于摄像头的识别
    print("启动摄像头识别...")
    camera_based_recognition(args.server_url, keyframes_num, frame_len)
