import os
import sys
import numpy as np
import cv2
import torch
import base64
import json
from datetime import datetime
import uvicorn
from fastapi import FastAPI, WebSocket, Request, Form, UploadFile, File, BackgroundTasks
from fastapi.responses import HTMLResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from starlette.websockets import WebSocketDisconnect
import asyncio
import io
import argparse  # 添加导入
from typing import Optional, List
import os.path as osp  # 添加导入
from mocap_utils.timer import Timer  # 添加导入

# 在导入和初始化FastAPI之前创建必要的目录
os.makedirs("static", exist_ok=True)
os.makedirs("templates", exist_ok=True)

# FrankMocap 相关导入
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from demo.demo_options import DemoOptions
from bodymocap.body_mocap_api import BodyMocap
from handmocap.hand_mocap_api import HandMocap
from bodymocap.body_bbox_detector import BodyPoseEstimator
from handmocap.hand_bbox_detector import HandBboxDetector
from integration.copy_and_paste import integration_copy_paste
import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu  # 添加导入

# 在FastAPI应用创建之前添加这段代码
current_dir = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(current_dir, "templates")
static_dir = os.path.join(current_dir, "static")

# 创建必要的目录（使用绝对路径）
os.makedirs(static_dir, exist_ok=True)
os.makedirs(templates_dir, exist_ok=True)

# 添加姿态平滑功能
class PoseSmoothing:
    def __init__(self, window_size=5, smoothing_factor=0.5):
        self.window_size = window_size
        self.smoothing_factor = smoothing_factor
        self.pose_history = []
        
    def smooth_pose(self, pose_data):
        """平滑姿态数据"""
        # 为了简单，只平滑关节位置数据
        if 'pred_joints_img' in pose_data:
            # 添加到历史记录
            self.pose_history.append(pose_data['pred_joints_img'].copy())
            
            # 如果历史记录超过窗口大小，移除最旧的
            if len(self.pose_history) > self.window_size:
                self.pose_history.pop(0)
            
            # 如果有足够的历史记录，进行平滑
            if len(self.pose_history) > 1:
                # 创建当前姿态的副本
                smoothed_joints = pose_data['pred_joints_img'].copy()
                
                # 计算过去帧的加权平均
                for past_pose in self.pose_history[:-1]:  # 排除最新添加的
                    smoothed_joints = smoothed_joints * (1 - self.smoothing_factor) + past_pose * self.smoothing_factor / (len(self.pose_history) - 1)
                
                # 更新姿态数据
                pose_data['pred_joints_img'] = smoothed_joints
        
        # 同样平滑网格顶点（如果存在）
        if 'pred_vertices_img' in pose_data:
            # 这里可以添加类似的平滑逻辑
            pass
            
        return pose_data

# 创建姿态平滑器实例
pose_smoother = PoseSmoothing(window_size=3, smoothing_factor=0.3)

# 使用绝对路径创建应用
app = FastAPI()
app.mount("/static", StaticFiles(directory=static_dir), name="static")
templates = Jinja2Templates(directory=templates_dir)

# 全局变量保存模型
device = None
body_pose_estimator = None  # 添加身体姿态估计器
hand_bbox_detector = None
body_mocap = None
hand_mocap = None

# 添加摄像头捕捉相关变量
camera = None
camera_active = False
camera_task = None

# 添加渲染器变量
visualizer = None

def initialize_models(args=None):
    global device, body_pose_estimator, hand_bbox_detector, body_mocap, hand_mocap, visualizer
    
    # 获取项目根目录的绝对路径
    root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    
    # 使用DemoOptions解析参数
    if args is None:
        parser = DemoOptions().parser
        args = parser.parse_args([])  # 不从命令行解析参数
    
    # 如果没有指定使用SMPLX，则默认使用
    if not hasattr(args, 'use_smplx'):
        args.use_smplx = True
    
    # 使用绝对路径设置必要参数
    args.use_smplx = True
    args.checkpoint_body_smplx = os.path.join(root_dir, 'extra_data/body_module/pretrained_weights/2020_05_31-00_50_43-best-51.749683916568756.pt')
    args.checkpoint_hand = os.path.join(root_dir, 'extra_data/hand_module/pretrained_weights/pose_shape_best.pth')
    args.smpl_dir = os.path.join(root_dir, 'extra_data/smpl')
    
    # 验证路径是否存在
    print(f"SMPL目录路径: {args.smpl_dir}")
    print(f"该路径是否存在: {os.path.exists(args.smpl_dir)}")
    
    if not os.path.exists(args.smpl_dir):
        raise FileNotFoundError(f"SMPL模型目录不存在: {args.smpl_dir}")
    
    # 设置设备
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    print(f"使用设备: {device}")
    
    # 初始化检测器和模型
    body_pose_estimator = BodyPoseEstimator()  # 添加身体姿态估计器初始化
    hand_bbox_detector = HandBboxDetector('third_view', device)
    body_mocap = BodyMocap(args.checkpoint_body_smplx, args.smpl_dir, device=device, use_smplx=True)
    hand_mocap = HandMocap(args.checkpoint_hand, args.smpl_dir, device=device)
    
    # 尝试不同的渲染器，发现无图形界面时不初始化渲染器
    try:
        # 检查是否有GUI环境（DISPLAY环境变量）
        has_display = 'DISPLAY' in os.environ or sys.platform == 'win32'
        
        # 尝试初始化渲染器
        renderer_type = args.renderer_type if hasattr(args, 'renderer_type') else 'opengl'
        if has_display:
            try:
                if renderer_type in ['pytorch3d', 'opendr']:
                    from renderer.screen_free_visualizer import Visualizer as VisualizerClass
                else:
                    from renderer.visualizer import Visualizer as VisualizerClass
                visualizer = VisualizerClass(renderer_type)
                print(f"成功初始化渲染器: {renderer_type}")
            except Exception as e:
                print(f"初始化标准渲染器失败: {str(e)}，尝试使用简单渲染器...")
                # 在渲染时会延迟选择渲染器
                visualizer = None
        else:
            print("检测到无图形界面环境，跳过渲染器初始化。将在渲染时使用备选方法。")
            visualizer = None
    except Exception as e:
        print(f"初始化渲染器时出错: {str(e)}，将在渲染时尝试使用备选方法。")
        visualizer = None
    
    print("模型初始化成功")

@app.on_event("startup")
async def startup_event():
    # 创建必要的目录
    os.makedirs("static", exist_ok=True)
    os.makedirs("templates", exist_ok=True)
    
    # 初始化模型
    initialize_models()

@app.on_event("shutdown")
async def shutdown_event():
    # 关闭摄像头
    global camera, camera_active
    if camera is not None:
        camera_active = False
        camera.release()

@app.get("/", response_class=HTMLResponse)
async def index(request: Request):
    # 渲染主页
    return templates.TemplateResponse("index.html", {"request": request})

@app.post("/upload", response_class=HTMLResponse)
async def upload_file(request: Request, file: UploadFile = File(...), mode: str = Form("frank")):
    # 处理上传的图片或视频文件
    contents = await file.read()
    
    # 确定文件类型
    file_ext = os.path.splitext(file.filename)[1].lower()
    
    # 创建结果目录
    results_dir = os.path.join(static_dir, "results")
    os.makedirs(results_dir, exist_ok=True)
    
    if file_ext in ['.jpg', '.jpeg', '.png', '.bmp']:
        # 处理图片
        img_bytes = np.asarray(bytearray(contents), dtype=np.uint8)
        img = cv2.imdecode(img_bytes, cv2.IMREAD_COLOR)
        
        # 处理图像
        result_img, keypoints = process_image(img, mode)
        
        # 保存结果
        result_path = os.path.join(results_dir, f"result_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jpg")
        cv2.imwrite(result_path, result_img)
        
        # 返回结果页面
        return templates.TemplateResponse(
            "result.html", 
            {
                "request": request, 
                "result_image": '/'.join(result_path.split(os.sep)[-3:])
            }
        )
    elif file_ext in ['.mp4', '.avi', '.mov', '.mkv']:
        # 处理视频文件
        # 这里只是保存视频，实际处理过于复杂，留给前端WebSocket处理
        video_path = os.path.join(results_dir, f"uploaded_{datetime.now().strftime('%Y%m%d_%H%M%S')}{file_ext}")
        with open(video_path, "wb") as f:
            f.write(contents)
            
        return templates.TemplateResponse(
            "video.html", 
            {
                "request": request, 
                "video_path": '/'.join(video_path.split(os.sep)[-3:]),
                "mode": mode
            }
        )
    else:
        return templates.TemplateResponse(
            "index.html", 
            {
                "request": request, 
                "error": "不支持的文件类型"
            }
        )

@app.get("/camera", response_class=HTMLResponse)
async def camera_page(request: Request):
    return templates.TemplateResponse("camera.html", {"request": request})

async def generate_camera_frames(mode: str = "frank"):
    global camera, camera_active
    
    # 初始化摄像头
    if camera is None:
        camera = cv2.VideoCapture(0)
        if not camera.isOpened():
            raise RuntimeError("无法打开摄像头")
    
    camera_active = True
    
    try:
        while camera_active:
            success, frame = camera.read()
            if not success:
                break
                
            # 处理图像
            result_img, _ = process_image(frame, mode)
            
            # 转换为JPEG
            _, jpeg = cv2.imencode('.jpg', result_img)
            
            # 构建multipart响应帧
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n')
                   
            # 控制帧率
            await asyncio.sleep(0.03)  # ~30fps
    except Exception as e:
        print(f"Camera streaming error: {e}")
    finally:
        camera_active = False

@app.get("/video_feed")
async def video_feed(mode: str = "frank"):
    return StreamingResponse(
        generate_camera_frames(mode),
        media_type="multipart/x-mixed-replace; boundary=frame"
    )

@app.get("/stop_camera")
async def stop_camera():
    global camera_active
    camera_active = False
    return {"status": "camera stopped"}

@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    await websocket.accept()
    print("WebSocket 连接已建立")
    try:
        while True:
            print("等待客户端消息...")
            data = await websocket.receive_text()  # 只保留一次接收
            print("收到客户端数据")
            json_data = json.loads(data)
            
            # 解析选择的模式
            mode = json_data.get('mode', 'frank')
            
            # 解析图像
            img_data = json_data.get('image').split(',')[1]
            img_bytes = base64.b64decode(img_data)
            np_arr = np.frombuffer(img_bytes, np.uint8)
            img_original_bgr = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
            
            # 处理图像
            result_img, keypoints = process_image(img_original_bgr, mode)
            
            # 将结果图像编码为base64
            _, buffer = cv2.imencode('.jpg', result_img)
            result_base64 = base64.b64encode(buffer).decode('utf-8')
            
            # 发送结果给客户端
            await websocket.send_json({
                'image': f"data:image/jpeg;base64,{result_base64}",
                'keypoints': keypoints.tolist() if keypoints is not None else None
            })
            
    except WebSocketDisconnect:
        print("Client disconnected")

def process_image(img, mode):
    """处理图像并根据模式返回结果"""
    try:
        # 使用正确的对象检测人体姿态
        body_pose_list, body_bbox_list = body_pose_estimator.detect_body_pose(img.copy())
        
        if len(body_bbox_list) < 1:
            return img, None
            
        # 根据模式选择不同处理
        if mode == 'hand':
            # 只处理手部
            hand_bbox_list = hand_bbox_detector.detect_hand_bbox_from_body_pose(
                body_pose_list, img.shape[:2])
            # 验证手部边界框有效性
            hand_bbox_list = validate_hand_bboxes(hand_bbox_list)
            
            # 如果有有效的手部边界框
            if any(bbox_pair is not None and any(bbox is not None for bbox in bbox_pair) 
                   for bbox_pair in hand_bbox_list):
                pred_output_list = hand_mocap.regress(img, hand_bbox_list, add_margin=True)
            else:
                # 没有有效手部边界框
                return img, None
            
        elif mode == 'body':
            # 只处理身体
            pred_output_list = body_mocap.regress(img, body_bbox_list)
            hand_bbox_list = [None, ] * len(body_bbox_list)
            
        else:  # frank - 默认完整模式
            # 处理身体
            pred_output_list = body_mocap.regress(img, body_bbox_list)
            
            # 从身体姿态获取手部框
            hand_bbox_list = body_mocap.get_hand_bboxes(pred_output_list, img.shape[:2])
            
            # 验证手部边界框有效性
            hand_bbox_list = validate_hand_bboxes(hand_bbox_list)
            
            # 检查是否有有效的手部边界框
            if any(bbox_pair is not None and any(bbox is not None for bbox in bbox_pair) 
                   for bbox_pair in hand_bbox_list):
                # 处理手部
                pred_hand_list = hand_mocap.regress(img, hand_bbox_list, add_margin=True)
                
                # 整合身体和手部结果
                pred_output_list = integration_copy_paste(
                    pred_output_list, pred_hand_list, body_mocap.smpl, img.shape)
        
        # 应用姿态平滑
        for i in range(len(pred_output_list)):
            pred_output_list[i] = pose_smoother.smooth_pose(pred_output_list[i])
            
        # 提取网格用于渲染
        pred_mesh_list = demo_utils.extract_mesh_from_output(pred_output_list)
        
        # 渲染结果 - 尝试不同的渲染器
        res_img = render_with_fallback(img, pred_mesh_list, body_bbox_list, hand_bbox_list)
            
        # 提取关键点数据
        keypoints = None
        if len(pred_output_list) > 0:
            keypoints = pred_output_list[0]['pred_joints_img']
            
        return res_img, keypoints
        
    except Exception as e:
        print(f"Error processing image: {str(e)}")
        import traceback
        traceback.print_exc()
        return img, None

def render_with_fallback(img, pred_mesh_list, body_bbox_list, hand_bbox_list):
    """尝试不同的渲染器并使用第一个可用的"""
    global visualizer  # 使用全局渲染器
    
    # 如果已经有初始化好的渲染器，直接使用
    if visualizer is not None:
        try:
            res_img = visualizer.visualize(
                img,
                pred_mesh_list=pred_mesh_list,
                body_bbox_list=body_bbox_list,
                hand_bbox_list=hand_bbox_list)
            return res_img
        except Exception as e:
            print(f"使用预初始化渲染器失败: {e}")
            # 继续尝试其他渲染器
    
    # 尝试使用不同的渲染器
    renderers_to_try = []
    
    # 检查是否有GUI环境
    has_display = 'DISPLAY' in os.environ or sys.platform == 'win32'
    
    if has_display:
        # 如果有图形界面，尝试所有渲染器
        renderers_to_try = ['pytorch3d', 'opendr', 'opengl']
    else:
        # 如果没有图形界面，只尝试不需要OpenGL的渲染器
        renderers_to_try = ['pytorch3d', 'opendr']
    
    for renderer_type in renderers_to_try:
        try:
            if renderer_type in ['pytorch3d', 'opendr']:
                from renderer.screen_free_visualizer import Visualizer
            else:
                from renderer.visualizer import Visualizer
                
            temp_visualizer = Visualizer(renderer_type)
            res_img = temp_visualizer.visualize(
                img,
                pred_mesh_list=pred_mesh_list,
                body_bbox_list=body_bbox_list,
                hand_bbox_list=hand_bbox_list)
                
            # 如果成功，保存为全局渲染器
            visualizer = temp_visualizer
            print(f"成功使用渲染器: {renderer_type}")
            return res_img
        except Exception as e:
            print(f"{renderer_type} 渲染器失败: {e}")
            continue
    
    # 如果所有渲染器都失败，使用基本的绘图（只绘制边界框和简单的骨架）
    print("所有渲染器都失败，使用基本绘图")
    result_img = img.copy()
    
    # 绘制身体边界框
    if body_bbox_list:
        for bbox in body_bbox_list:
            if bbox is not None:
                x1, y1, x2, y2 = [int(coord) for coord in bbox]
                cv2.rectangle(result_img, (x1, y1), (x2, y2), (0, 255, 0), 2)
    
    # 绘制手部边界框
    if hand_bbox_list:
        for hand_pair in hand_bbox_list:
            if hand_pair is not None:
                for i, bbox in enumerate(hand_pair):
                    if bbox is not None:
                        x1, y1, x2, y2 = [int(coord) for coord in bbox]
                        # 左手绿色，右手蓝色
                        color = (0, 255, 0) if i == 0 else (255, 0, 0)
                        cv2.rectangle(result_img, (x1, y1), (x2, y2), color, 2)
    
    return result_img
def validate_hand_bboxes(hand_bbox_list):
    """验证手部边界框，过滤掉无效的边界框"""
    valid_hand_bbox_list = []
    
    for hand_bboxes in hand_bbox_list:
        if hand_bboxes is None:
            valid_hand_bbox_list.append(None)
            continue
            
        valid_pair = [None, None]  # [左手, 右手]
        
        for hand_type, bbox in enumerate(hand_bboxes):
            # 跳过空边界框
            if bbox is None:
                continue
                
            try:
                # 验证边界框格式
                if isinstance(bbox, (list, tuple, np.ndarray)):
                    if len(bbox) >= 4:
                        # 只使用前4个值
                        min_x, min_y, max_x, max_y = bbox[:4]
                    else:
                        print(f"跳过格式错误的边界框 (值太少): {bbox}")
                        continue
                else:
                    print(f"跳过非数组格式的边界框: {bbox}")
                    continue
                
                # 转换为浮点型确保计算正确
                min_x = float(min_x)
                min_y = float(min_y)
                max_x = float(max_x)
                max_y = float(max_y)
                
                # 确保边界框尺寸有效
                width = max_x - min_x
                height = max_y - min_y
                
                # 额外检查：确保坐标有效
                if min_x < 0 or min_y < 0 or max_x <= min_x or max_y <= min_y:
                    print(f"跳过坐标无效的边界框: [{min_x}, {min_y}, {max_x}, {max_y}]")
                    continue
                    
                if width > 10 and height > 10:
                    # 使用标准化格式存储边界框
                    valid_pair[hand_type] = [min_x, min_y, max_x, max_y]
                else:
                    print(f"跳过尺寸过小的手部边界框: [{min_x}, {min_y}, {max_x}, {max_y}], 宽度={width}, 高度={height}")
            except Exception as e:
                print(f"处理边界框时出错: {bbox}, 错误: {str(e)}")
                continue
                
        valid_hand_bbox_list.append(valid_pair)
        
    return valid_hand_bbox_list

# 新增：处理命令行运行模式
def run_mocap_on_input(args, mode='frank'):
    """运行mocap处理在指定输入上"""
    global body_pose_estimator, hand_bbox_detector, body_mocap, hand_mocap, visualizer

    # 确保模型已初始化
    if body_pose_estimator is None:
        initialize_models(args)

    # 设置输入数据
    input_type, input_data = demo_utils.setup_input(args)
    
    # 确保输出目录存在
    if args.out_dir is not None:
        os.makedirs(args.out_dir, exist_ok=True)
        frames_dir = os.path.join(args.out_dir, "frames")
        results_dir = os.path.join(args.out_dir, "results")
        os.makedirs(frames_dir, exist_ok=True)
        os.makedirs(results_dir, exist_ok=True)
    
    cur_frame = args.start_frame
    video_frame = 0
    timer = Timer()
    
    # 处理每一帧
    while True:
        timer.tic()
        
        # 加载数据
        if input_type == 'image_dir':
            if cur_frame < len(input_data):
                image_path = input_data[cur_frame]
                img_original_bgr = cv2.imread(image_path)
            else:
                img_original_bgr = None
                
        elif input_type == 'video':
            _, img_original_bgr = input_data.read()
            if video_frame < cur_frame:
                video_frame += 1
                continue
            
            # 保存获取的视频帧
            image_path = osp.join(args.out_dir, "frames", f"{cur_frame:05d}.jpg")
            if img_original_bgr is not None:
                video_frame += 1
                if args.save_frame:
                    gnu.make_subdir(image_path)
                    cv2.imwrite(image_path, img_original_bgr)
                    
        elif input_type == 'webcam':
            _, img_original_bgr = input_data.read()
            if video_frame < cur_frame:
                video_frame += 1
                continue
            
            # 保存获取的视频帧
            image_path = osp.join(args.out_dir, "frames", f"scene_{cur_frame:05d}.jpg")
            if img_original_bgr is not None:
                video_frame += 1
                if args.save_frame:
                    gnu.make_subdir(image_path)
                    cv2.imwrite(image_path, img_original_bgr)
                    
        else:
            assert False, "未知的输入类型"
            
        cur_frame += 1
        if img_original_bgr is None or cur_frame > args.end_frame:
            break
            
        print("--------------------------------------")
        
        # 处理图像
        result_img, keypoints = process_image(img_original_bgr, mode)
        
        # 显示结果
        if not args.no_display:
            result_img = result_img.astype(np.uint8)
            cv2.imshow('FrankMocap Result', result_img)
            key = cv2.waitKey(1)
            if key == 27:  # ESC键
                break
        
        # 保存结果图像
        if args.out_dir is not None:
            result_path = osp.join(args.out_dir, "results", f"{os.path.basename(image_path)}")
            gnu.make_subdir(result_path)
            cv2.imwrite(result_path, result_img)
        
        timer.toc(bPrint=True, title="Time")
        print(f"Processed: {image_path}")
    
    # 生成视频输出
    if not args.no_video_out and input_type in ['video', 'webcam']:
        demo_utils.gen_video_out(args.out_dir, args.seq_name)
    
    # 释放资源
    if input_type in ['video', 'webcam'] and input_data is not None:
        input_data.release()
    cv2.destroyAllWindows()

def run_frank_mocap(args):
    """运行完整的FrankMocap模式（身体+手部）"""
    run_mocap_on_input(args, mode='frank')

def run_hand_mocap(args):
    """只运行手部捕捉模式"""
    run_mocap_on_input(args, mode='hand')

def run_body_mocap(args):
    """只运行身体捕捉模式"""
    run_mocap_on_input(args, mode='body')

if __name__ == "__main__":
    # 检查是否有命令行参数
    if len(sys.argv) > 1:
        # 命令行模式
        parser = DemoOptions().parser
        args = parser.parse_args()
        
        # 设置默认值
        if not hasattr(args, 'seq_name') or args.seq_name is None:
            args.seq_name = os.path.basename(args.input_path).split('.')[0] if args.input_path != 'webcam' else 'webcam'
        
        # 根据脚本名称或选项决定运行模式
        script_name = sys.argv[0].lower()
        if 'hand' in script_name or args.mode == 'hand':
            print("运行手部捕捉模式")
            run_hand_mocap(args)
        elif 'body' in script_name or args.mode == 'body':
            print("运行身体捕捉模式")
            run_body_mocap(args)
        else:
            print("运行完整的FrankMocap模式")
            run_frank_mocap(args)
    else:
        # Web应用模式
        # 创建静态文件夹和模板文件夹
        os.makedirs("static", exist_ok=True)
        os.makedirs("templates", exist_ok=True)
        
        # 启动服务器
        uvicorn.run(app, host="0.0.0.0", port=8000)