import cv2
import mediapipe as mp
import csv
import numpy as np
import os

# 初始化Mediapipe
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils

# 视频输入路径（0表示默认摄像头）
video_path = '/mnt2/lth/pose_track/jiangshiwu.mp4'  # 替换为你的视频路径
output_video_path = 'jiangshiwu_result.mp4'
output_csv_path = 'jiangshiwu_landmarks_data.csv'

# 打开视频文件或摄像头
cap = cv2.VideoCapture(video_path)

# 检查视频是否成功打开
if not cap.isOpened():
    raise ValueError("无法打开视频文件或摄像头！")

# 获取视频的帧率、宽度和高度
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

# 创建输出视频写入器
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))

# 创建CSV文件并写入表头
with open(output_csv_path, 'w', newline='') as csvfile:
    fieldnames = ['frame_index', 'person_id', 'pose_index', 'x', 'y', 'z', 'visibility']
    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
    writer.writeheader()

# 人物跟踪相关参数
next_person_id = 0
max_missing_frames = 10  # 允许丢失的最大帧数
tracked_persons = []  # 当前跟踪的人物列表

class TrackedPerson:
    def __init__(self, person_id, landmarks, bbox):
        self.person_id = person_id
        self.landmarks = landmarks
        self.bbox = bbox
        self.missing_frames = 0
        self.history = []  # 保存历史关键点用于插值

    def update(self, landmarks, bbox):
        self.landmarks = landmarks
        self.bbox = bbox
        self.missing_frames = 0
        self.history.append(landmarks)
        if len(self.history) > 5:  # 只保留最近5帧
            self.history.pop(0)

    def mark_missing(self):
        self.missing_frames += 1

    def is_expired(self):
        return self.missing_frames > max_missing_frames

# 计算两个边界框的IOU
def calculate_iou(bbox1, bbox2):
    x1, y1, w1, h1 = bbox1
    x2, y2, w2, h2 = bbox2
    
    xA = max(x1, x2)
    yA = max(y1, y2)
    xB = min(x1 + w1, x2 + w2)
    yB = min(y1 + h1, y2 + h2)
    
    # 计算交集面积
    intersection_area = max(0, xB - xA) * max(0, yB - yA)
    
    # 计算并集面积
    box1_area = w1 * h1
    box2_area = w2 * h2
    union_area = box1_area + box2_area - intersection_area
    
    # 计算IOU
    iou = intersection_area / float(union_area + 1e-6)
    return iou

# 从关键点估计边界框
def get_bbox_from_landmarks(landmarks, width, height):
    x_coords = [lmk.x for lmk in landmarks.landmark]
    y_coords = [lmk.y for lmk in landmarks.landmark]
    
    x_min = max(0, int(min(x_coords) * width))
    y_min = max(0, int(min(y_coords) * height))
    x_max = min(width, int(max(x_coords) * width))
    y_max = min(height, int(max(y_coords) * height))
    
    return (x_min, y_min, x_max - x_min, y_max - y_min)

# 初始化Pose模型（将enable_segmentation设为True以获取更好的边界框估计）
with mp_pose.Pose(static_image_mode=False,
                  model_complexity=2,
                  enable_segmentation=True,
                  min_detection_confidence=0.5,
                  min_tracking_confidence=0.5) as pose:

    frame_index = 0
    
    while True:
        # 读取一帧视频
        ret, frame = cap.read()
        
        # 如果无法读取帧，说明视频结束
        if not ret:
            break
        
        frame_index += 1
        print(f"处理第 {frame_index}/{total_frames} 帧...")
        
        # 转为RGB
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        
        # 处理帧
        results = pose.process(rgb_frame)
        
        # 绘制结果
        annotated_frame = frame.copy()
        
        # 存储当前帧检测到的所有人
        current_persons = []
        
        # 处理检测到的人体姿态
        if results.pose_landmarks:
            # 将结果包装成列表，以便处理多个人
            all_landmarks = [results.pose_landmarks]
            
            # 为每个检测到的人体创建边界框并关联到现有跟踪ID
            for landmarks in all_landmarks:
                bbox = get_bbox_from_landmarks(landmarks, width, height)
                
                # 计算与现有跟踪人物的IOU匹配
                best_match_idx = -1
                best_iou = 0.3  # 最小匹配阈值
                
                for i, tracked_person in enumerate(tracked_persons):
                    iou = calculate_iou(bbox, tracked_person.bbox)
                    if iou > best_iou:
                        best_iou = iou
                        best_match_idx = i
                
                # 如果找到匹配，更新对应人物
                if best_match_idx != -1:
                    tracked_persons[best_match_idx].update(landmarks, bbox)
                    current_persons.append(tracked_persons[best_match_idx])
                else:
                    # 否则创建新的跟踪人物
                    new_person = TrackedPerson(next_person_id, landmarks, bbox)
                    next_person_id += 1
                    current_persons.append(new_person)
            
            # 标记未匹配的跟踪人物为丢失
            for tracked_person in tracked_persons:
                if tracked_person not in current_persons:
                    tracked_person.mark_missing()
            
            # 移除过期的跟踪人物
            tracked_persons = [p for p in tracked_persons if not p.is_expired()]
            
            # 添加新检测到的人物
            for person in current_persons:
                if person not in tracked_persons:
                    tracked_persons.append(person)
        else:
            # 如果没有检测到关键点，标记所有跟踪人物为丢失
            for tracked_person in tracked_persons:
                tracked_person.mark_missing()
            
            # 移除过期的跟踪人物
            tracked_persons = [p for p in tracked_persons if not p.is_expired()]
        
        # 保存关键点数据到CSV并绘制可视化结果
        with open(output_csv_path, 'a', newline='') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            
            # 绘制每个人物的关键点和ID
            for person in current_persons:
                person_id = person.person_id
                landmarks = person.landmarks
                
                # 绘制骨架连接
                mp_drawing.draw_landmarks(
                    annotated_frame,
                    landmarks,
                    mp_pose.POSE_CONNECTIONS,
                    mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=3),
                    mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2))
                
                # 获取边界框位置
                x, y, w, h = person.bbox
                
                # 在边界框上方绘制人物ID
                cv2.rectangle(annotated_frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                cv2.putText(annotated_frame, f"Person {person_id}", (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
                
                # 在每个关键点旁边绘制索引并保存数据
                for pose_index, landmark in enumerate(landmarks.landmark):
                    # 获取关键点在图像中的坐标
                    cx, cy = int(landmark.x * width), int(landmark.y * height)
                    
                    # 在关键点旁边绘制索引编号
                    cv2.putText(annotated_frame, str(pose_index), (cx + 10, cy),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                    
                    # 保存关键点数据
                    writer.writerow({
                        'frame_index': frame_index,
                        'person_id': person_id,
                        'pose_index': pose_index,
                        'x': landmark.x,
                        'y': landmark.y,
                        'z': landmark.z,
                        'visibility': landmark.visibility
                    })
            
            # 如果没有检测到人物，记录空数据
            if not current_persons:
                writer.writerow({
                    'frame_index': frame_index,
                    'person_id': -1,  # 表示没有检测到人物
                    'pose_index': -1,
                    'x': None,
                    'y': None,
                    'z': None,
                    'visibility': None
                })
        
        # 写入输出视频
        out.write(annotated_frame)
        
      

# 释放资源
cap.release()
out.release()
cv2.destroyAllWindows()

print(f"已保存带有关键点索引的视频: {output_video_path}")
print(f"已保存关键点数据到CSV文件: {output_csv_path}")
print(f"共跟踪到 {next_person_id} 个人物")