#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
实时人体关键点检测Flask服务器
Real-time Human Pose Estimation Flask Server

支持实时视频流处理和WebSocket通信
"""

from flask import Flask, request, jsonify, Response
from flask_cors import CORS, cross_origin
import cv2
import numpy as np
import base64
import os
import json
import threading
import time
from collections import defaultdict
import torch
from ultralytics import YOLO
from ultralytics.utils.plotting import Annotator, colors

app = Flask(__name__)
CORS(app, origins=['http://localhost:5173', 'http://localhost:3000', 'http://127.0.0.1:5173'])  # 允许跨域请求

# 全局变量
model = None
device = None
is_processing = False
current_video_path = None
track_history = defaultdict(lambda: [])
window_created = False  # 跟踪窗口是否已创建

def init_model():
    """初始化YOLO模型"""
    global model, device
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(f'Using device: {device}')
    
    # 加载YOLO11 pose模型
    model = YOLO('yolo11n-pose.pt').to(device)
    print("✅ YOLO11 pose模型加载完成")

def cleanup_windows():
    """清理所有OpenCV窗口"""
    global window_created
    try:
        # 先尝试关闭可能存在的乱码窗口
        try:
            cv2.destroyWindow('多目标人体关键点检测和跟踪')
        except:
            pass
        try:
            cv2.destroyWindow('Pose Detection')
        except:
            pass
        # 然后关闭所有窗口
        cv2.destroyAllWindows()
        window_created = False
        print("✅ 已清理所有OpenCV窗口")
    except Exception as e:
        print(f"清理窗口时出错: {e}")

def create_display_window():
    """创建显示窗口"""
    global window_created
    try:
        # 先清理所有窗口
        cleanup_windows()
        # 等待一下确保窗口完全关闭
        import time
        time.sleep(0.3)
        # 创建新窗口，使用英文标题避免编码问题
        cv2.namedWindow('Pose Detection', cv2.WINDOW_AUTOSIZE)
        window_created = True
        print("✅ 视频窗口已创建，按 'q' 键退出")
    except Exception as e:
        print(f"创建窗口时出错: {e}")

# 关键点连接定义（COCO格式的17个关键点）
skeleton = [
    [16, 14], [14, 12], [17, 15], [15, 13], [12, 13],
    [6, 12], [7, 13], [6, 7], [6, 8], [7, 9],
    [8, 10], [9, 11], [2, 3], [1, 2], [1, 3],
    [2, 4], [3, 5], [4, 6], [5, 7]
]

# 关键点名称
keypoint_names = [
    'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear',
    'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',
    'left_wrist', 'right_wrist', 'left_hip', 'right_hip',
    'left_knee', 'right_knee', 'left_ankle', 'right_ankle'
]

def process_video_realtime(video_path, conf=0.5, iou=0.7, show_window=True):
    """实时处理视频并生成帧数据"""
    global is_processing, track_history, window_created
    
    # 确保参数类型正确
    conf = float(conf) if conf is not None else 0.5
    iou = float(iou) if iou is not None else 0.7
    
    print(f"开始实时处理视频: {video_path}, conf={conf}, iou={iou}")
    
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        print(f"无法打开视频文件: {video_path}")
        return None
    
    # 获取视频属性
    fps = int(cap.get(cv2.CAP_PROP_FPS)) or 30
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    
    print(f"视频信息: {width}x{height} @ {fps}fps, 总帧数: {total_frames}")
    
    frame_count = 0
    is_processing = True
    
    # 创建显示窗口
    if show_window:
        create_display_window()
    
    try:
        while is_processing:
            ret, frame = cap.read()
            if not ret:
                break
            
            frame_count += 1
            
            # 使用YOLO进行pose检测和跟踪
            try:
                results = model.track(frame, persist=True, conf=conf, iou=iou)
            except Exception as e:
                print(f"YOLO track错误: {e}")
                # 如果track失败，使用predict方法
                results = model.predict(frame, conf=conf, iou=iou)
            
            # 创建注释器
            annotator = Annotator(frame, line_width=2, font_size=1)
            
            detections = []
            
            # 处理检测结果
            if results[0].boxes is not None and results[0].keypoints is not None:
                boxes = results[0].boxes.xyxy.cpu().numpy()
                track_ids = results[0].boxes.id.int().cpu().numpy() if results[0].boxes.id is not None else None
                keypoints = results[0].keypoints.xy.cpu().numpy()
                confidences = results[0].keypoints.conf.cpu().numpy()
                
                # 为每个检测到的人绘制关键点和骨架
                for i, (box, kpts, kpt_conf) in enumerate(zip(boxes, keypoints, confidences)):
                    # 获取跟踪ID
                    track_id = track_ids[i] if track_ids is not None else i
                    
                    # 绘制边界框
                    x1, y1, x2, y2 = map(int, box)
                    color = colors(track_id, True)
                    annotator.box_label(box, f'Person {track_id}', color=color)
                    
                    # 绘制关键点
                    person_keypoints = []
                    for j, (kp, kp_conf) in enumerate(zip(kpts, kpt_conf)):
                        if kp_conf > 0.5:  # 只绘制置信度高的关键点
                            x, y = int(kp[0]), int(kp[1])
                            cv2.circle(frame, (x, y), 4, color, -1)
                            cv2.putText(frame, keypoint_names[j], (x+5, y-5), 
                                      cv2.FONT_HERSHEY_SIMPLEX, 0.3, color, 1)
                            
                            person_keypoints.append({
                                'name': keypoint_names[j],
                                'x': float(x),
                                'y': float(y),
                                'confidence': float(kp_conf)
                            })
                    
                    # 绘制骨架连接
                    for connection in skeleton:
                        pt1_idx, pt2_idx = connection[0] - 1, connection[1] - 1
                        if (pt1_idx < len(kpts) and pt2_idx < len(kpts) and 
                            kpt_conf[pt1_idx] > 0.5 and kpt_conf[pt2_idx] > 0.5):
                            pt1 = tuple(map(int, kpts[pt1_idx]))
                            pt2 = tuple(map(int, kpts[pt2_idx]))
                            cv2.line(frame, pt1, pt2, color, 2)
                    
                    # 记录跟踪历史（用于绘制轨迹）
                    center = ((x1 + x2) // 2, (y1 + y2) // 2)
                    track_history[track_id].append(center)
                    
                    # 保持历史记录长度
                    if len(track_history[track_id]) > 30:
                        track_history[track_id].pop(0)
                    
                    # 绘制轨迹
                    if len(track_history[track_id]) > 1:
                        points = np.array(track_history[track_id], dtype=np.int32)
                        cv2.polylines(frame, [points], False, color, 2)
                    
                    detections.append({
                        'track_id': int(track_id),
                        'bbox': [float(x1), float(y1), float(x2), float(y2)],
                        'keypoints': person_keypoints,
                        'confidence': float(np.mean(kpt_conf))
                    })
            
            # 添加信息文本
            info_text = f"Frame: {frame_count} | Persons: {len(detections)}"
            cv2.putText(frame, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            
            # 显示视频窗口
            if show_window and frame is not None and frame.size > 0:
                cv2.imshow('Pose Detection', frame)
                # 检查是否按下 'q' 键退出
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    print("用户按下 'q' 键，停止处理")
                    is_processing = False
                    break
            
            # 转换为base64
            _, buffer = cv2.imencode('.jpg', frame)
            img_base64 = base64.b64encode(buffer).decode('utf-8')
            
            # 返回帧数据
            yield {
                'frame': frame_count,
                'total_frames': total_frames,
                'image': img_base64,
                'detections': detections,
                'video_info': {
                    'width': width,
                    'height': height,
                    'fps': fps
                }
            }
            
            # 控制帧率
            time.sleep(1.0 / fps)
            
    finally:
        cap.release()
        if show_window:
            cleanup_windows()
        is_processing = False
        track_history.clear()

@app.route('/api/pose/realtime/start', methods=['POST'])
def start_realtime_processing():
    """开始实时处理"""
    global is_processing, current_video_path
    
    if is_processing:
        return jsonify({'error': '正在处理中，请先停止当前处理'}), 400
    
    try:
        if 'file' not in request.files:
            return jsonify({'error': '没有上传文件'}), 400
        
        file = request.files['file']
        if file.filename == '':
            return jsonify({'error': '没有选择文件'}), 400
        
        # 获取参数
        conf = float(request.form.get('conf', 0.5))
        iou = float(request.form.get('iou', 0.7))
        
        # 保存上传的视频
        video_filename = f"realtime_{file.filename}"
        video_path = os.path.join('uploads', video_filename)
        
        # 创建uploads目录
        os.makedirs('uploads', exist_ok=True)
        file.save(video_path)
        
        current_video_path = video_path
        
        # 在后台线程中开始处理
        def process_thread():
            global is_processing
            is_processing = True
            try:
                print(f"开始处理线程，参数: conf={conf}, iou={iou}")
                for frame_data in process_video_realtime(video_path, conf, iou):
                    # 这里可以通过WebSocket发送数据到前端
                    if frame_data:
                        print(f"处理帧 {frame_data.get('frame', 0)}")
                    pass
            except Exception as e:
                print(f"处理线程错误: {e}")
                import traceback
                traceback.print_exc()
            finally:
                is_processing = False
                # 删除临时文件
                if os.path.exists(video_path):
                    os.remove(video_path)
                print("处理线程结束")
        
        thread = threading.Thread(target=process_thread)
        thread.daemon = True
        thread.start()
        
        return jsonify({
            'success': True,
            'message': '开始实时处理',
            'video_info': {
                'filename': video_filename,
                'conf': conf,
                'iou': iou
            }
        })
        
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/api/pose/realtime/stop', methods=['POST'])
def stop_realtime_processing():
    """停止实时处理"""
    global is_processing
    
    is_processing = False
    # 清理所有OpenCV窗口
    cleanup_windows()
    
    return jsonify({'success': True, 'message': '已停止处理'})

@app.route('/api/pose/realtime/status', methods=['GET'])
def get_realtime_status():
    """获取实时处理状态"""
    return jsonify({
        'is_processing': is_processing,
        'current_video': current_video_path
    })

@app.route('/api/pose/realtime/stream', methods=['GET'])
@cross_origin()
def stream_realtime_processing():
    """实时流式处理（SSE）"""
    def generate():
        global is_processing, current_video_path, frame_queue
        
        if not is_processing:
            yield f"data: {json.dumps({'error': '没有正在处理的视频'})}\n\n"
            return
        
        print("开始SSE流式传输...")
        last_frame_sent = 0
        
        try:
            while is_processing:
                # 检查是否有新的帧数据
                if len(frame_queue) > last_frame_sent:
                    frame_data = frame_queue[last_frame_sent]
                    last_frame_sent += 1
                    
                    # 发送SSE数据
                    yield f"data: {json.dumps(frame_data)}\n\n"
                    print(f"发送帧数据: {frame_data.get('frame', 0)}")
                else:
                    # 没有新数据，等待一下
                    time.sleep(0.1)
                
        except Exception as e:
            print(f"SSE流错误: {e}")
            yield f"data: {json.dumps({'error': str(e)})}\n\n"
    
    response = Response(generate(), mimetype='text/event-stream')
    response.headers['Cache-Control'] = 'no-cache'
    response.headers['Connection'] = 'keep-alive'
    response.headers['Access-Control-Allow-Origin'] = '*'
    response.headers['Access-Control-Allow-Headers'] = 'Cache-Control'
    return response

@app.route('/api/pose/status', methods=['GET'])
def get_status():
    """获取服务器状态"""
    try:
        return jsonify({
            'status': 'running',
            'device': str(device) if device else 'unknown',
            'model': 'YOLO11n-pose',
            'keypoints': len(keypoint_names),
            'keypoint_names': keypoint_names,
            'is_processing': is_processing
        })
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/api/pose/cleanup', methods=['POST'])
def cleanup_all_windows():
    """强制清理所有OpenCV窗口"""
    try:
        cleanup_windows()
        return jsonify({'success': True, 'message': '已清理所有窗口'})
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/api/pose/realtime/show', methods=['POST'])
def start_realtime_show():
    """开始实时处理并直接显示视频窗口"""
    global is_processing, current_video_path
    
    if is_processing:
        return jsonify({'error': '正在处理中，请先停止当前处理'}), 400
    
    try:
        if 'file' not in request.files:
            return jsonify({'error': '没有上传文件'}), 400
        
        file = request.files['file']
        if file.filename == '':
            return jsonify({'error': '没有选择文件'}), 400
        
        # 获取参数
        conf = float(request.form.get('conf', 0.5))
        iou = float(request.form.get('iou', 0.7))
        
        # 保存上传的视频
        video_filename = f"realtime_{file.filename}"
        video_path = os.path.join('uploads', video_filename)
        
        # 创建uploads目录
        os.makedirs('uploads', exist_ok=True)
        file.save(video_path)
        
        current_video_path = video_path
        
        # 在后台线程中开始处理并显示窗口
        def process_thread():
            global is_processing
            is_processing = True
            try:
                print(f"开始处理线程（显示窗口），参数: conf={conf}, iou={iou}")
                for frame_data in process_video_realtime(video_path, conf, iou, show_window=True):
                    if frame_data and is_processing:
                        print(f"处理帧 {frame_data.get('frame', 0)}")
                    pass
            except Exception as e:
                print(f"处理线程错误: {e}")
                import traceback
                traceback.print_exc()
            finally:
                is_processing = False
                # 删除临时文件
                if os.path.exists(video_path):
                    os.remove(video_path)
                print("处理线程结束")
        
        thread = threading.Thread(target=process_thread)
        thread.daemon = True
        thread.start()
        
        return jsonify({
            'success': True,
            'message': '开始实时处理并显示视频窗口',
            'video_info': {
                'filename': video_filename,
                'conf': conf,
                'iou': iou
            }
        })
        
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/', methods=['GET'])
def index():
    """首页"""
    return jsonify({
        'message': '实时人体关键点检测API服务器',
        'version': '2.0.0',
        'endpoints': [
            'POST /api/pose/realtime/start - 开始实时处理',
            'POST /api/pose/realtime/stop - 停止实时处理',
            'POST /api/pose/realtime/show - 开始实时处理并显示视频窗口',
            'GET /api/pose/realtime/status - 获取处理状态',
            'GET /api/pose/realtime/stream - 实时流式数据',
            'GET /api/pose/status - 获取服务器状态'
        ]
    })

if __name__ == '__main__':
    print("🚀 启动实时人体关键点检测API服务器...")
    print("📡 服务器地址: http://localhost:5000")
    
    # 初始化模型
    init_model()
    
    app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)
