import numpy as np
import tensorflow as tf
import joblib
import os
import cv2
import mediapipe as mp
import time  


model_path = "模型/pull_up_classifier.h5"
scaler_path = "模型/scaler.pkl"

if not os.path.exists(model_path) or not os.path.exists(scaler_path):
    print(f"错误: 模型文件或标准化器文件不存在")
    print(f"请先运行 pull_up_classifier.py 训练模型")
    exit(1)

model = tf.keras.models.load_model(model_path)
scaler = joblib.load(scaler_path)


mp_pose = mp.solutions.pose
pose = mp_pose.Pose(static_image_mode=True, model_complexity=2, min_detection_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils

# 定义手部末端关键点索引
HAND_TIPS_INDICES = [
    17,  # LEFT_PINKY (左小指指尖)
    19,  # LEFT_INDEX (左食指指尖)
    21,  # LEFT_THUMB (左拇指指尖)
    18,  # RIGHT_PINKY (右小指指尖)
    20,  # RIGHT_INDEX (右食指指尖)
    22,  # RIGHT_THUMB (右拇指指尖)
]

# 定义头部上端和脚部关键点索引
HEAD_TOP_INDEX = 0  # 头部上端
FOOT_INDEX = 31  # 左脚踝 (或者可以用 32 右脚踝)

def extract_landmarks_from_image(image_path):
    """
    从单张图片中提取姿态关键点，并进行归一化处理
    :param image_path: 图片路径
    :return: 包含所有关键点数据的列表，如果未检测到姿态则返回 None
    """

    if not os.path.exists(image_path):
        print(f"错误: 图片文件不存在: {image_path}")
        return None
        

    image = cv2.imread(image_path)
    if image is None:
        print(f"警告: 无法读取图片 {image_path}")
        return None


    height, width, channels = image.shape
    print(f"图片尺寸: {width}x{height}, 通道数: {channels}")

    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    results = pose.process(image_rgb)

    if not results.pose_landmarks:
        print(f"警告: 在图片 {image_path} 中未检测到姿态关键点")
        return None


    landmarks_raw = []
    for landmark in results.pose_landmarks.landmark:
        landmarks_raw.append([landmark.x, landmark.y, landmark.z, landmark.visibility])
    

    landmarks_array = np.array(landmarks_raw)
    

    hand_tips_coords = landmarks_array[HAND_TIPS_INDICES, :3]  # 只取x,y,z坐标，不包括visibility
    hand_tips_mean = np.mean(hand_tips_coords, axis=0)
    

    head_top_y = landmarks_array[HEAD_TOP_INDEX, 1]
    foot_y = landmarks_array[FOOT_INDEX, 1]
    height_diff = abs(foot_y - head_top_y)
    

    scale_factor = 200.0 / height_diff if height_diff > 0 else 1.0
    

    normalized_landmarks = np.zeros_like(landmarks_array)
    
    normalized_landmarks[:, :3] = (landmarks_array[:, :3] - hand_tips_mean) * scale_factor
    

    normalized_landmarks[:, 3] = landmarks_array[:, 3]
    

    normalized_landmarks_flat = normalized_landmarks.flatten().tolist()
    
    return normalized_landmarks_flat

def predict_pull_up(image_path):
    """
    预测图片中的人物是否成功完成引体向上
    :param image_path: 图片路径
    :return: 预测结果和概率
    """

    landmarks = extract_landmarks_from_image(image_path)
    if landmarks is None:
        return None, None
    

    features = np.array([landmarks])
    features_scaled = scaler.transform(features)
    

    prediction_prob = model.predict(features_scaled)[0][0]
    prediction = 1 if prediction_prob >= 0.5 else 0
    
    return prediction, prediction_prob

def visualize_prediction(image_path, prediction, probability):
    """
    可视化预测结果
    :param image_path: 图片路径
    :param prediction: 预测结果 (0 或 1)
    :param probability: 预测概率
    """

    output_dir = "预测结果"
    os.makedirs(output_dir, exist_ok=True)
    

    image = cv2.imread(image_path)
    if image is None:
        return
    

    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    results = pose.process(image_rgb)
    

    annotated_image = image.copy()
    if results.pose_landmarks:
        mp_drawing.draw_landmarks(
            annotated_image, 
            results.pose_landmarks,
            mp_pose.POSE_CONNECTIONS,
            mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2),
            mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2)
        )
    

    result_text = "成功" if prediction == 1 else "未成功"
    color = (0, 255, 0) if prediction == 1 else (0, 0, 255)  # 绿色表示成功，红色表示未成功
    
    cv2.putText(
        annotated_image,
        f"预测: {result_text} ({probability:.2f})",
        (10, 30),
        cv2.FONT_HERSHEY_SIMPLEX,
        1,
        color,
        2
    )
    

    timestamp = time.strftime("%Y%m%d_%H%M%S")
    original_filename = os.path.basename(image_path)
    filename_without_ext = os.path.splitext(original_filename)[0]
    output_filename = f"{filename_without_ext}_predicted_{timestamp}.jpg"
    

    output_path = os.path.join(output_dir, output_filename)
    cv2.imwrite(output_path, annotated_image)
    print(f"预测结果图片已保存到: {output_path}")
    
    return output_path

def main():
    """
    主函数，用于测试模型
    """
    print("引体向上姿势识别模型预测")
    print("=" * 40)
    

    test_image_path = input("请输入要预测的图片路径: ").strip().strip('"').strip("'")
    
    if not os.path.exists(test_image_path):
        print(f"错误: 图片文件不存在: {test_image_path}")
        return
    
    prediction, probability = predict_pull_up(test_image_path)
    
    if prediction is None:
        print("无法进行预测，请检查图片是否包含可识别的人体姿态")
        return
    

    result_text = "成功" if prediction == 1 else "未成功"
    print(f"\n预测结果: 引体向上{result_text}")
    print(f"预测概率: {probability:.4f}")
    

    output_path = visualize_prediction(test_image_path, prediction, probability)
    print(f"可视化结果已保存到: {output_path}")

if __name__ == "__main__":
    main()