# Copyright (c) Facebook, Inc. and its affiliates.

import os
import sys
import os.path as osp
import torch
import numpy as np
import cv2
import argparse
from datetime import datetime

# 导入所需的模块
from demo.demo_options import DemoOptions
from demo.demo_bodymocap import run_body_mocap
from demo.demo_handmocap import run_hand_mocap
from demo.demo_frankmocap import run_frank_mocap
from demo.demo_visualize_prediction import visualize_prediction, __get_data_type, __get_smpl_model

from bodymocap.body_mocap_api import BodyMocap
from handmocap.hand_mocap_api import HandMocap
from bodymocap.body_bbox_detector import BodyPoseEstimator
from handmocap.hand_bbox_detector import HandBboxDetector

import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu
from mocap_utils.timer import Timer

def print_menu():
    """打印交互式菜单"""
    print("\n===== FrankMocap 交互式系统 =====")
    print("1. 身体动作捕捉 (Body Mocap)")
    print("2. 手部动作捕捉 (Hand Mocap)")
    print("3. 完整动作捕捉 (Frank Mocap)")
    print("4. 可视化之前的预测结果")
    print("0. 退出程序")
    print("================================")
    return input("请选择功能 (0-4): ")

def setup_body_mocap(args):
    """设置身体动作捕捉模块"""
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    assert torch.cuda.is_available(), "当前版本仅支持GPU"

    # 设置检测器
    body_bbox_detector = BodyPoseEstimator()

    # 设置动作捕捉回归器
    use_smplx = args.use_smplx
    checkpoint_path = args.checkpoint_body_smplx if use_smplx else args.checkpoint_body_smpl
    print("use_smplx", use_smplx)
    body_mocap = BodyMocap(checkpoint_path, args.smpl_dir, device, use_smplx)

    # 设置可视化器
    if args.renderer_type in ['pytorch3d', 'opendr']:
        from renderer.screen_free_visualizer import Visualizer
    else:
        from renderer.visualizer import Visualizer
    visualizer = Visualizer(args.renderer_type)
    
    return body_bbox_detector, body_mocap, visualizer

def setup_hand_mocap(args):
    """设置手部动作捕捉模块"""
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    assert torch.cuda.is_available(), "当前版本仅支持GPU"

    # 设置Bbox检测器
    bbox_detector = HandBboxDetector(args.view_type, device)

    # 设置Mocap回归器
    hand_mocap = HandMocap(args.checkpoint_hand, args.smpl_dir, device=device)

    # 设置可视化器
    if args.renderer_type in ['pytorch3d', 'opendr']:
        from renderer.screen_free_visualizer import Visualizer
    else:
        from renderer.visualizer import Visualizer
    visualizer = Visualizer(args.renderer_type)
    
    return bbox_detector, hand_mocap, visualizer

def setup_frank_mocap(args):
    """设置完整动作捕捉模块"""
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    assert torch.cuda.is_available(), "当前版本仅支持GPU"

    hand_bbox_detector = HandBboxDetector('third_view', device)
    
    # 设置Mocap回归器
    body_mocap = BodyMocap(args.checkpoint_body_smplx, args.smpl_dir, device=device, use_smplx=True)
    hand_mocap = HandMocap(args.checkpoint_hand, args.smpl_dir, device=device)

    # 设置可视化器
    if args.renderer_type in ['pytorch3d', 'opendr']:
        from renderer.screen_free_visualizer import Visualizer
    else:
        from renderer.visualizer import Visualizer
    visualizer = Visualizer(args.renderer_type)
    
    return hand_bbox_detector, body_mocap, hand_mocap, visualizer

def create_default_output_dir():
    """创建默认的输出目录"""
    current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
    output_dir = os.path.join(os.getcwd(), "output", current_time)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    return output_dir

def setup_args_for_option(option):
    """根据选项设置参数"""
    args = DemoOptions().parse()
    
    # 如果未指定输出目录，创建一个默认的
    if args.out_dir is None or args.out_dir == '':
        args.out_dir = create_default_output_dir()
        print(f"未指定输出目录。使用: {args.out_dir}")
    
    # 确保输出目录存在
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)
    
    # 根据选项设置特定参数
    if option == '3':  # Frank Mocap
        args.use_smplx = True
    
    # 确保输入路径存在
    if args.input_path and not os.path.exists(args.input_path):
        print(f"错误: 输入路径 '{args.input_path}' 不存在")
        return None
    
    # 如果选择了选项4但没有指定pkl_dir
    if option == '4' and not args.pkl_dir:
        print("错误: 请使用--pkl_dir指定保存预测结果的目录")
        return None
    
    return args

def body_mocap_option():
    """身体动作捕捉选项"""
    print("\n== 身体动作捕捉 ==")
    args = setup_args_for_option('1')
    if args is None:
        return
    
    if args.input_path is None:
        print("请指定输入路径 (--input_path)")
        return

    body_bbox_detector, body_mocap, visualizer = setup_body_mocap(args)
    run_body_mocap(args, body_bbox_detector, body_mocap, visualizer)
    print(f"处理完成。结果保存在: {args.out_dir}")

def hand_mocap_option():
    """手部动作捕捉选项"""
    print("\n== 手部动作捕捉 ==")
    args = setup_args_for_option('2')
    if args is None:
        return
    
    if args.input_path is None:
        print("请指定输入路径 (--input_path)")
        return

    bbox_detector, hand_mocap, visualizer = setup_hand_mocap(args)
    run_hand_mocap(args, bbox_detector, hand_mocap, visualizer)
    print(f"处理完成。结果保存在: {args.out_dir}")

def frank_mocap_option():
    """完整动作捕捉选项"""
    print("\n== 完整动作捕捉 ==")
    args = setup_args_for_option('3')
    if args is None:
        return
    
    if args.input_path is None:
        print("请指定输入路径 (--input_path)")
        return

    hand_bbox_detector, body_mocap, hand_mocap, visualizer = setup_frank_mocap(args)
    run_frank_mocap(args, hand_bbox_detector, body_mocap, hand_mocap, visualizer)
    print(f"处理完成。结果保存在: {args.out_dir}")

def visualize_prediction_option():
    """可视化之前的预测结果选项"""
    print("\n== 可视化预测结果 ==")
    args = setup_args_for_option('4')
    if args is None:
        return
    
    # 检查pkl_dir是否存在
    if not os.path.exists(args.pkl_dir):
        print(f"错误: pkl目录 '{args.pkl_dir}' 不存在")
        return
    
    # 加载pkl文件
    pkl_files = gnu.get_all_files(args.pkl_dir, ".pkl", "full")
    if len(pkl_files) == 0:
        print(f"错误: 在 '{args.pkl_dir}' 找不到任何 .pkl 文件")
        return
    
    # 获取smpl类型
    demo_type, smpl_type = __get_data_type(pkl_files)
    
    # 获取smpl模型
    smpl_model = __get_smpl_model(demo_type, smpl_type)
    
    # 设置可视化器
    assert args.renderer_type in ['pytorch3d', 'opendr'], \
        f"{args.renderer_type} 尚未实现。"
    from renderer.screen_free_visualizer import Visualizer
    visualizer = Visualizer(args.renderer_type)
    
    # 可视化预测结果
    visualize_prediction(args, demo_type, smpl_type, smpl_model, pkl_files, visualizer)
    print(f"处理完成。结果保存在: {args.out_dir}")

def main():
    """主函数"""
    while True:
        choice = print_menu()
        
        if choice == '0':
            print("退出程序...")
            break
        elif choice == '1':
            body_mocap_option()
        elif choice == '2':
            hand_mocap_option()
        elif choice == '3':
            frank_mocap_option()
        elif choice == '4':
            visualize_prediction_option()
        else:
            print("无效选择，请重试")
        
        # 等待用户按回车继续
        input("\n按回车键继续...")

if __name__ == '__main__':
    main()