import cv2
import mediapipe as mp
import os
import csv
import numpy as np


mp_pose = mp.solutions.pose
pose = mp_pose.Pose(static_image_mode=True, model_complexity=2, min_detection_confidence=0.5)

# 定义相对路径 (假设脚本在项目根目录下运行, 即 "引体向上检测软件" 文件夹)
image_base_dir = "image"


if not os.path.exists(image_base_dir):
    print(f"错误: 图片基础目录 '{image_base_dir}' 不存在。请确保该目录存在并包含所需的子文件夹。")
    print(f"当前工作目录: {os.getcwd()}")
    exit(1)


successful_pullup_dir = os.path.join(image_base_dir, "成功引体向上")
failed_pullup_dir = os.path.join(image_base_dir, "没做到引体向上")


if not os.path.exists(successful_pullup_dir):
    print(f"错误: '成功引体向上' 文件夹不存在: {successful_pullup_dir}")
    exit(1)
if not os.path.exists(failed_pullup_dir):
    print(f"错误: '没做到引体向上' 文件夹不存在: {failed_pullup_dir}")
    exit(1)


dataset_dir = "数据集" 
os.makedirs(dataset_dir, exist_ok=True)  
dataset_file_path = os.path.join(dataset_dir, "pull_up_landmarks.csv")


HAND_TIPS_INDICES = [
    17,  # LEFT_PINKY (左小指指尖)
    19,  # LEFT_INDEX (左食指指尖)
    21,  # LEFT_THUMB (左拇指指尖)
    18,  # RIGHT_PINKY (右小指指尖)
    20,  # RIGHT_INDEX (右食指指尖)
    22,  # RIGHT_THUMB (右拇指指尖)
]


HEAD_TOP_INDEX = 0 
FOOT_INDEX = 31  


SCALE_FACTORS = [1.0, 1.1, 1.2]  # 放大比例

def extract_landmarks_from_image(image_path):
    """
    从单张图片中提取姿态关键点，并进行归一化处理
    :param image_path: 图片路径
    :return: 包含所有关键点数据的列表，如果未检测到姿态则返回 None
    """
  
    if not os.path.exists(image_path):
        print(f"错误: 图片文件不存在: {image_path}")
        return None
        
    filename = os.path.basename(image_path)
    if filename.startswith('.'):
        print(f"跳过隐藏文件: {image_path}")
        return None
        

    valid_extensions = ['.jpg', '.jpeg', '.png', '.bmp']
    file_ext = os.path.splitext(image_path)[1].lower()
    if file_ext not in valid_extensions:
        print(f"跳过非图片文件: {image_path} (扩展名: {file_ext})")
        return None


    image = cv2.imread(image_path)
    if image is None:
        print(f"警告: 无法读取图片 {image_path}")
        print(f"  - 请检查图片格式是否正确")
        print(f"  - 请检查图片是否已损坏")
        print(f"  - 请检查是否有足够的权限访问该文件")
        return None


    height, width, channels = image.shape
    print(f"  图片尺寸: {width}x{height}, 通道数: {channels}")

    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    results = pose.process(image_rgb)

    if not results.pose_landmarks:
        print(f"警告: 在图片 {image_path} 中未检测到姿态关键点")
        return None

 
    landmarks_raw = []
    for landmark in results.pose_landmarks.landmark:
        landmarks_raw.append([landmark.x, landmark.y, landmark.z, landmark.visibility])
    

    landmarks_array = np.array(landmarks_raw)
    

    hand_tips_coords = landmarks_array[HAND_TIPS_INDICES, :3]  # 只取x,y,z坐标，不包括visibility
    hand_tips_mean = np.mean(hand_tips_coords, axis=0)
    

    head_top_y = landmarks_array[HEAD_TOP_INDEX, 1]
    foot_y = landmarks_array[FOOT_INDEX, 1]
    height_diff = abs(foot_y - head_top_y)
    
    scale_factor = 200.0 / height_diff if height_diff > 0 else 1.0
   
    normalized_landmarks = np.zeros_like(landmarks_array)
    
    normalized_landmarks[:, :3] = (landmarks_array[:, :3] - hand_tips_mean) * scale_factor
    
    normalized_landmarks[:, 3] = landmarks_array[:, 3]
    
    normalized_landmarks_flat = normalized_landmarks.flatten().tolist()
    
    return normalized_landmarks_flat

def apply_scale_augmentation(landmarks_data):
    """
    对关键点数据应用不同的放大比例，生成额外的训练数据
    :param landmarks_data: 原始关键点数据列表
    :return: 包含原始数据和增强数据的列表
    """
    if landmarks_data is None:
        return []
    
    landmarks = np.array(landmarks_data).reshape(33, 4)
    
    augmented_data = []
  
    augmented_data.append(landmarks_data)
    
    for x_scale in SCALE_FACTORS:
        for y_scale in SCALE_FACTORS:
            
            if x_scale == 1.0 and y_scale == 1.0:
                continue
                
         
            scaled_landmarks = landmarks.copy()
            
            
            scaled_landmarks[:, 0] *= x_scale  # x坐标
            scaled_landmarks[:, 1] *= y_scale  # y坐标
            
           
            augmented_data.append(scaled_landmarks.flatten().tolist())
            
    return augmented_data

def get_processed_image_paths():
    """
    如果数据集文件已存在，获取已处理过的图片路径列表
    :return: 已处理过的图片路径集合
    """
    processed_images = set()
    
    if not os.path.exists(dataset_file_path):
        return processed_images
    
    try:
        with open(dataset_file_path, 'r', newline='') as csvfile:
            reader = csv.reader(csvfile)
            next(reader)  # 跳过表头
            
            # 尝试从CSV文件中读取已处理的图片路径

            # 这里我是假设CSV文件的每一行对应一张已处理的图片
            for _ in reader:
                # 这里只是计数
                processed_images.add("dummy")  # 使用一个占位符
    except Exception as e:
        print(f"读取已有数据集时出错: {e}")
    
    return processed_images

def create_dataset():
    """
    创建并保存包含关键点数据和标签的数据集CSV文件
    如果数据集已存在，则追加新数据
    """
 
    header = []
    for i in range(33): 
        header.extend([f'landmark_{i}_x', f'landmark_{i}_y', f'landmark_{i}_z', f'landmark_{i}_visibility'])
    header.append('label') # 最后一列是标签，0表示没做到引体向上，1表示成功引体向上

  
    valid_extensions = ['.jpg', '.jpeg', '.png', '.bmp']
    
    def is_valid_image(file_path):
        if not os.path.isfile(file_path):
            return False
        if os.path.basename(file_path).startswith('.'):
            return False
        file_ext = os.path.splitext(file_path)[1].lower()
        return file_ext in valid_extensions
    
    successful_images = [os.path.join(successful_pullup_dir, f) for f in os.listdir(successful_pullup_dir) 
                         if is_valid_image(os.path.join(successful_pullup_dir, f))]
    
    failed_images = [os.path.join(failed_pullup_dir, f) for f in os.listdir(failed_pullup_dir) 
                     if is_valid_image(os.path.join(failed_pullup_dir, f))]

 
    print(f"找到 {len(successful_images)} 张有效的 '成功引体向上' 图片")
    print(f"找到 {len(failed_images)} 张有效的 '没做到引体向上' 图片")

    if not successful_images and not failed_images:
        print("警告: '成功引体向上' 和 '没做到引体向上' 文件夹中都没有有效图片。请添加图片后再运行。")
        if not os.path.exists(dataset_file_path):

            with open(dataset_file_path, 'w', newline='') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow(header) 
            print(f"已创建空的CSV文件: {dataset_file_path} (仅包含表头)")
        else:
            print(f"数据集文件已存在: {dataset_file_path}，无需创建新文件")
        return
    elif not successful_images:
        print("警告: '成功引体向上' 文件夹中没有有效图片。")
    elif not failed_images:
        print("警告: '没做到引体向上' 文件夹中没有有效图片。")


    file_exists = os.path.exists(dataset_file_path)
    

    processed_images = get_processed_image_paths()
    processed_count = len(processed_images)
    
    if file_exists:
        print(f"数据集文件已存在: {dataset_file_path}")
        print(f"已处理过约 {processed_count} 张图片")
        print("将追加新数据到现有数据集...")

        mode = 'a'
    else:
        print(f"数据集文件不存在，将创建新文件: {dataset_file_path}")

        mode = 'w'

    with open(dataset_file_path, mode, newline='') as csvfile:
        writer = csv.writer(csvfile)
        

        if mode == 'w':
            writer.writerow(header)


        print(f"正在处理 '成功引体向上' 文件夹中的图片...")
        for image_path in successful_images:
            print(f"  处理图片: {os.path.basename(image_path)}")
            landmarks = extract_landmarks_from_image(image_path)
            if landmarks:
                # 数据增强
                augmented_landmarks_list = apply_scale_augmentation(landmarks)
                print(f"    生成了 {len(augmented_landmarks_list)} 组增强数据")
                
                # 写入增强后的数据
                for aug_landmarks in augmented_landmarks_list:
                    writer.writerow(aug_landmarks + [1])  

        print(f"正在处理 '没做到引体向上' 文件夹中的图片...")
        for image_path in failed_images:
            print(f"  处理图片: {os.path.basename(image_path)}")
            landmarks = extract_landmarks_from_image(image_path)
            if landmarks:
    
                augmented_landmarks_list = apply_scale_augmentation(landmarks)
                print(f"    生成了 {len(augmented_landmarks_list)} 组增强数据")
                
           
                for aug_landmarks in augmented_landmarks_list:
                    writer.writerow(aug_landmarks + [0]) 
    
    print(f"数据集已成功{'更新' if file_exists else '创建'}并保存到: {dataset_file_path}")

if __name__ == "__main__":

    print(f"当前工作目录: {os.getcwd()}")
    print(f"图片基础目录的绝对路径: {os.path.abspath(image_base_dir)}")
    print(f"'成功引体向上'文件夹的绝对路径: {os.path.abspath(successful_pullup_dir)}")
    print(f"'没做到引体向上'文件夹的绝对路径: {os.path.abspath(failed_pullup_dir)}")
    
    create_dataset()