{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第六章：卷积神经网络\n",
    "湖北理工学院《机器学习》课程资料\n",
    "\n",
    "作者：李辉楚吴\n",
    "\n",
    "笔记内容概述: 基于YOLO的行人检测，姿态估计"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Import required libraries\n",
    "import torch\n",
    "from ultralytics import YOLO\n",
    "import cv2\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from PIL import Image\n",
    "\n",
    "# Download a pretrained YOLO model\n",
    "model = YOLO('Data/yolov8n.pt')  # Load the local pretrained YOLOv8 nano model from models directory\n",
    "pose_model = YOLO('Data/yolov8n-pose.pt')  # Load the pretrained YOLOv8 pose estimation model\n",
    "\n",
    "# Set device\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(f\"Using device: {device}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "准备数据，测试检测模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load and process an image\n",
    "image_path = 'Data/pose1.jpg'\n",
    "image = Image.open(image_path)\n",
    "\n",
    "# Run pose estimation\n",
    "results = pose_model(image)\n",
    "\n",
    "# Plot the results\n",
    "plt.figure(figsize=(12, 8))\n",
    "plt.imshow(results[0].plot())\n",
    "plt.axis('off')\n",
    "plt.title('Pose Estimation Results')\n",
    "plt.show()\n",
    "\n",
    "# Get keypoints and plot them\n",
    "\n",
    "# Each keypoint has:\n",
    "# - x coordinate (keypoints[person_idx][keypoint_idx][0])\n",
    "# - y coordinate (keypoints[person_idx][keypoint_idx][1])\n",
    "# - confidence score (keypoints[person_idx][keypoint_idx][2])\n",
    "\n",
    "# The 17 keypoints are ordered as:\n",
    "# 0: nose\n",
    "# 1: left_eye\n",
    "# 2: right_eye\n",
    "# 3: left_ear\n",
    "# 4: right_ear\n",
    "# 5: left_shoulder\n",
    "# 6: right_shoulder\n",
    "# 7: left_elbow\n",
    "# 8: right_elbow\n",
    "# 9: left_wrist\n",
    "# 10: right_wrist\n",
    "# 11: left_hip\n",
    "# 12: right_hip\n",
    "# 13: left_knee\n",
    "keypoints = results[0].keypoints.data.cpu().numpy()\n",
    "if len(keypoints) > 0:\n",
    "    # Create a copy of the image for drawing\n",
    "    img = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)\n",
    "    \n",
    "    # Draw keypoints\n",
    "    for kps in keypoints:\n",
    "        for idx, kp in enumerate(kps):\n",
    "            x, y, conf = kp\n",
    "            if conf > 0.5:  # Only draw high confidence keypoints\n",
    "                cv2.circle(img, (int(x), int(y)), 2, (0, 255, 0), -1)\n",
    "    \n",
    "    # Convert back to RGB for displaying\n",
    "    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
    "    \n",
    "    # Display the image with keypoints\n",
    "    plt.figure(figsize=(12, 8))\n",
    "    plt.imshow(img_rgb)\n",
    "    plt.axis('off')\n",
    "    plt.title('Detected Keypoints')\n",
    "    plt.show()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "视频中的姿态估计"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load video\n",
    "video_path = 'Data/pose_video.mp4'\n",
    "cap = cv2.VideoCapture(video_path)\n",
    "\n",
    "# Get video properties\n",
    "frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
    "frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
    "fps = int(cap.get(cv2.CAP_PROP_FPS))\n",
    "\n",
    "# Create video writer\n",
    "output_path = 'Data/pose_output.mp4'\n",
    "out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))\n",
    "\n",
    "while cap.isOpened():\n",
    "    ret, frame = cap.read()\n",
    "    if not ret:\n",
    "        break\n",
    "        \n",
    "    # Convert BGR to RGB\n",
    "    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
    "    \n",
    "    # Run pose estimation\n",
    "    results = pose_model(frame_rgb)\n",
    "    \n",
    "    # Process and visualize results directly on frame\n",
    "    for result in results:\n",
    "        boxes = result.boxes.data.cpu().numpy()\n",
    "        keypoints = result.keypoints.data.cpu().numpy()\n",
    "        \n",
    "        if len(keypoints) > 0:\n",
    "            # Draw keypoints and connections\n",
    "            for kps in keypoints:\n",
    "                for idx, kp in enumerate(kps):\n",
    "                    x, y, conf = kp\n",
    "                    if conf > 0.5:  # Only draw high confidence keypoints\n",
    "                        cv2.circle(frame, (int(x), int(y)), 3, (0, 255, 0), -1)\n",
    "                        \n",
    "                        # Draw skeleton connections\n",
    "                        connections = [\n",
    "                            (5, 7), (7, 9),   # Left arm\n",
    "                            (6, 8), (8, 10),  # Right arm\n",
    "                            (11, 13), (13, 15),  # Left leg\n",
    "                            (12, 14), (14, 16),  # Right leg\n",
    "                            (5, 11), (6, 12),    # Shoulders to hips\n",
    "                            (11, 12), (5, 6)     # Hip connection and shoulder connection\n",
    "                        ]\n",
    "                        \n",
    "                        for start_idx, end_idx in connections:\n",
    "                            if idx == start_idx and kps[end_idx][2] > 0.5:\n",
    "                                start_point = (int(x), int(y))\n",
    "                                end_point = (int(kps[end_idx][0]), int(kps[end_idx][1]))\n",
    "                                cv2.line(frame, start_point, end_point, (0, 255, 0), 2)\n",
    "    # Write frame\n",
    "    out.write(frame)\n",
    "    \n",
    "    # Display frame (optional)\n",
    "    cv2.imshow('Pose Estimation', frame)\n",
    "    \n",
    "    # Break loop on 'q' press\n",
    "    if cv2.waitKey(1) & 0xFF == ord('q'):\n",
    "        break\n",
    "\n",
    "# Release everything\n",
    "cap.release()\n",
    "out.release()\n",
    "cv2.destroyAllWindows()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "machinelearning",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
