from ultralytics import YOLO
import cv2
import os
import json
from pathlib import Path
import numpy as np
import shutil
import random

def calculate_bbox_from_keypoints(keypoints, img_width, img_height, padding=0.1):
    """根据关键点计算边界框"""
    # 提取所有可见关键点的x,y坐标
    valid_x = []
    valid_y = []
    for i in range(0, len(keypoints), 3):
        x, y, v = keypoints[i:i+3]
        if v > 0:  # 只考虑可见的关键点
            valid_x.append(x * img_width)  # 转换回像素坐标
            valid_y.append(y * img_height)
    
    if not valid_x or not valid_y:
        return [0, 0, 0, 0]  # 如果没有可见关键点，返回零边界框
    
    # 计算边界框
    x_min = min(valid_x)
    y_min = min(valid_y)
    x_max = max(valid_x)
    y_max = max(valid_y)
    
    # 添加边距
    width = x_max - x_min
    height = y_max - y_min
    x_min = max(0, x_min - width * padding)
    y_min = max(0, y_min - height * padding)
    x_max = min(img_width, x_max + width * padding)
    y_max = min(img_height, y_max + height * padding)
    
    # 转换为YOLO格式：中心点坐标和宽高
    x_center = (x_min + x_max) / 2
    y_center = (y_min + y_max) / 2
    width = x_max - x_min
    height = y_max - y_min
    
    # 归一化
    x_center = x_center / img_width
    y_center = y_center / img_height
    width = width / img_width
    height = height / img_height
    
    return [x_center, y_center, width, height]

def process_frame(frame, frame_count, model, output_dir, is_train=True):
    """处理单个视频帧"""
    # 进行预测
    results = model(frame)
    
    # 获取关键点数据
    keypoints = results[0].keypoints.data
    if len(keypoints) == 0:  # 如果没有检测到人，跳过
        return False
    
    # 创建输出目录
    split = "train" if is_train else "val"
    images_dir = output_dir / "images" / split
    labels_dir = output_dir / "labels" / split
    visualization_dir = output_dir / "visualization" / split
    
    images_dir.mkdir(parents=True, exist_ok=True)
    labels_dir.mkdir(parents=True, exist_ok=True)
    visualization_dir.mkdir(parents=True, exist_ok=True)
    
    # 保存原始图片
    image_path = images_dir / f"frame_{frame_count:06d}.jpg"
    cv2.imwrite(str(image_path), frame)
    
    # 保存可视化标注图片
    vis_path = visualization_dir / f"frame_{frame_count:06d}.jpg"
    annotated_frame = results[0].plot()
    cv2.imwrite(str(vis_path), annotated_frame)
    
    # 准备标注数据
    h, w = frame.shape[:2]
    
    # 创建YOLO格式的标签文件
    label_path = labels_dir / f"frame_{frame_count:06d}.txt"
    with open(label_path, 'w') as f:
        for person_keypoints in keypoints:
            # 转换为相对坐标
            keypoints_normalized = []
            for kp in person_keypoints:
                x, y = kp[0].item() / w, kp[1].item() / h
                conf = kp[2].item() if len(kp) > 2 else 1.0
                keypoints_normalized.extend([x, y, conf])
            
            # 计算边界框
            bbox = calculate_bbox_from_keypoints(keypoints_normalized, w, h)
            
            # YOLO格式：class x_center y_center width height kp1_x kp1_y kp1_conf kp2_x kp2_y kp2_conf ...
            line = "0 " + " ".join(map(str, bbox + keypoints_normalized))
            f.write(line + "\n")
    
    print(f"Saved frame {frame_count} with {len(keypoints)} person(s) to {split} set")
    return True

def main():
    # 设置路径
    video_path = "match.mp4"
    output_dir = Path("dataset")
    model_path = "yolo11x-pose.pt"
    
    # 加载模型
    model = YOLO(model_path)
    
    # 打开视频文件
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        print(f"Error: Could not open video file {video_path}")
        return
    
    # 获取视频信息
    fps = cap.get(cv2.CAP_PROP_FPS)
    frame_count = 0
    processed_count = 0
    
    print(f"Video FPS: {fps}")
    print("Processing video...")
    
    while cap.isOpened():
        success, frame = cap.read()
        if not success:
            break
        
        # 每10帧处理一次
        if frame_count % 10 == 0:
            # 随机分配到训练集或验证集（80%训练，20%验证）
            is_train = random.random() < 0.8
            if process_frame(frame, frame_count, model, output_dir, is_train):
                processed_count += 1
        
        frame_count += 1
        
        # 打印进度
        if frame_count % 100 == 0:
            print(f"Processed {frame_count} frames, saved {processed_count} frames")
    
    cap.release()
    
    # 创建数据集配置文件
    dataset_config = {
        "path": str(output_dir),
        "train": str(output_dir / "images" / "train"),
        "val": str(output_dir / "images" / "val"),
        "nc": 1,  # 类别数量
        "names": ["person"],  # 类别名称
        "kpt_shape": [17, 3],  # 关键点数量和每个关键点的维度
        "visualization_dir": str(output_dir / "visualization")
    }
    
    with open(output_dir / "dataset.yaml", 'w') as f:
        import yaml
        yaml.dump(dataset_config, f, default_flow_style=False)
    
    print("\nProcessing completed!")
    print(f"Total frames processed: {frame_count}")
    print(f"Frames saved: {processed_count}")
    print("Dataset configuration file created!")
    print("\nYou can now:")
    print("1. Check the visualization images in dataset/visualization/")
    print("2. Manually review and remove any low-quality samples")
    print("3. Use the dataset to train yolo11n-pose.pt")

if __name__ == '__main__':
    main() 