# Copyright (c) Facebook, Inc. and its affiliates.
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import os
import sys
import os.path as osp
import torch
from torchvision.transforms import Normalize
import numpy as np
import cv2
import argparse
import json
import pickle
from mocap_utils.smoothing import PoseSmoothing  # 导入刚才创建的平滑处理类
import time  # 添加time模块导入
import tkinter as tk
from tkinter import filedialog
############# input parameters  #############
from demo.demo_options import DemoOptions
from bodymocap.body_mocap_api import BodyMocap
from handmocap.hand_mocap_api import HandMocap
import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu
from mocap_utils.timer import Timer
from datetime import datetime

from bodymocap.body_bbox_detector import BodyPoseEstimator
from handmocap.hand_bbox_detector import HandBboxDetector
from integration.copy_and_paste import integration_copy_paste

import renderer.image_utils as imu
from renderer.viewer2D import ImShow


def __filter_bbox_list(body_bbox_list, hand_bbox_list, single_person):
    # (to make the order as consistent as possible without tracking)
    bbox_size =  [ (x[2] * x[3]) for x in body_bbox_list]
    idx_big2small = np.argsort(bbox_size)[::-1]
    body_bbox_list = [ body_bbox_list[i] for i in idx_big2small ]
    hand_bbox_list = [hand_bbox_list[i] for i in idx_big2small]

    if single_person and len(body_bbox_list)>0:
        body_bbox_list = [body_bbox_list[0], ]
        hand_bbox_list = [hand_bbox_list[0], ]

    return body_bbox_list, hand_bbox_list


def run_regress(
    args, img_original_bgr, 
    body_bbox_list, hand_bbox_list, bbox_detector,
    body_mocap, hand_mocap
):
    cond1 = len(body_bbox_list) > 0 and len(hand_bbox_list) > 0
    cond2 = not args.frankmocap_fast_mode

    # use pre-computed bbox or use slow detection mode
    if cond1 or cond2:
        if not cond1 and cond2:
            # run detection only when bbox is not available
            body_pose_list, body_bbox_list, hand_bbox_list, _ = \
                bbox_detector.detect_hand_bbox(img_original_bgr.copy())
        else:
            print("Use pre-computed bounding boxes")
        assert len(body_bbox_list) == len(hand_bbox_list)

        if len(body_bbox_list) < 1: 
            return list(), list(), list()

        # sort the bbox using bbox size 
        # only keep on bbox if args.single_person is set
        body_bbox_list, hand_bbox_list = __filter_bbox_list(
            body_bbox_list, hand_bbox_list, args.single_person)

        # hand & body pose regression
        pred_hand_list = hand_mocap.regress(
            img_original_bgr, hand_bbox_list, add_margin=True)
        pred_body_list = body_mocap.regress(img_original_bgr, body_bbox_list)
        assert len(hand_bbox_list) == len(pred_hand_list)
        assert len(pred_hand_list) == len(pred_body_list)

    else:
        _, body_bbox_list = bbox_detector.detect_body_bbox(img_original_bgr.copy())

        if len(body_bbox_list) < 1: 
            return list(), list(), list()

        # sort the bbox using bbox size 
        # only keep on bbox if args.single_person is set
        hand_bbox_list = [None, ] * len(body_bbox_list)
        body_bbox_list, _ = __filter_bbox_list(
            body_bbox_list, hand_bbox_list, args.single_person)

        # body regression first 
        pred_body_list = body_mocap.regress(img_original_bgr, body_bbox_list)
        assert len(body_bbox_list) == len(pred_body_list)

        # get hand bbox from body
        hand_bbox_list = body_mocap.get_hand_bboxes(pred_body_list, img_original_bgr.shape[:2])
        assert len(pred_body_list) == len(hand_bbox_list)

        # hand regression
        pred_hand_list = hand_mocap.regress(
            img_original_bgr, hand_bbox_list, add_margin=True)
        assert len(hand_bbox_list) == len(pred_hand_list) 

    # integration by copy-and-paste
    integral_output_list = integration_copy_paste(
        pred_body_list, pred_hand_list, body_mocap.smpl, img_original_bgr.shape)
    
    return body_bbox_list, hand_bbox_list, integral_output_list


# filepath: [demo_frankmocap.py](http://_vscodecontentref_/2)
# 修改文件选择逻辑部分
def run_frank_mocap(args, bbox_detector, body_mocap, hand_mocap, visualizer):
    # 使用文件选择对话框选择输入文件
    if hasattr(args, 'use_file_dialog') and args.use_file_dialog:
        root = tk.Tk()
        root.withdraw()  # 隐藏主窗口
        
        # 打开文件选择对话框
        video_file = filedialog.askopenfilename(
            title="选择视频文件",
            filetypes=[
                ("视频文件", "*.mp4 *.avi *.mov *.mkv"),
                ("所有文件", "*.*")
            ]
        )
        
        # 如果用户选择了文件，更新input_path
        if video_file:
            args.input_path = video_file
            print(f"已选择视频文件: {args.input_path}")
        else:
            print("未选择任何文件，将使用示例视频")
            # 提供一个默认视频路径
            args.input_path = './sample_data/han_hand_short.mp4'
            
            # 检查默认视频是否存在
            if not os.path.exists(args.input_path):
                print(f"错误: 默认视频文件 {args.input_path} 不存在")
                print("请指定一个有效的视频文件路径")
                sys.exit(1)
            
            print(f"使用默认视频: {args.input_path}")
    #Setup input data to handle different types of inputs
    input_type, input_data = demo_utils.setup_input(args)
    
    # 添加默认输出目录
    if args.out_dir is None:
        args.out_dir = os.path.join(os.getcwd(), 'mocap_output')
        os.makedirs(args.out_dir, exist_ok=True)
        print(f"未指定输出目录。使用默认目录: {args.out_dir}")
    
    # 添加默认seq_name
    if not hasattr(args, 'seq_name') or args.seq_name is None:
        # 从输入路径提取序列名称或使用默认名称
        if hasattr(args, 'input_path') and args.input_path != 'webcam':
            # 从输入路径提取文件名作为序列名
            seq_name = os.path.basename(args.input_path).split('.')[0]
        else:
            # 使用当前时间戳作为默认序列名
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            seq_name = f"mocap_sequence_{timestamp}"
        
        # 将seq_name添加到args中
        args.seq_name = seq_name
        print(f"未指定序列名称。使用默认名称: {args.seq_name}")
     
    # 创建姿态平滑器
    pose_smoother = PoseSmoothing(window_size=5, smoothing_factor=0.7)
    
    # 添加帧率控制
    if not hasattr(args, 'target_fps'):
        args.target_fps = 30  # 默认目标帧率
    frame_time = 1.0 / args.target_fps
    
    # 获取输入视频的帧率(如果是视频)
    original_fps = None
    if input_type == 'video' and input_data is not None:
        original_fps = input_data.get(cv2.CAP_PROP_FPS)
        if original_fps and original_fps > 0:
            # 自动设置目标帧率为原始帧率
            args.target_fps = min(original_fps, 30)  # 最高限制为30fps
            frame_time = 1.0 / args.target_fps
            print(f"原始视频帧率: {original_fps}, 目标显示帧率: {args.target_fps}")
    
    cur_frame = args.start_frame
    video_frame = 0
    
    # 主处理循环
    while True:
        start_time = time.time()  # 记录处理开始时间
        
        # load data
        load_bbox = False

        if input_type =='image_dir':
            if cur_frame < len(input_data):
                image_path = input_data[cur_frame]
                img_original_bgr  = cv2.imread(image_path)
            else:
                img_original_bgr = None

        elif input_type == 'bbox_dir':
            if cur_frame < len(input_data):
                image_path = input_data[cur_frame]['image_path']
                hand_bbox_list = input_data[cur_frame]['hand_bbox_list']
                body_bbox_list = input_data[cur_frame]['body_bbox_list']
                img_original_bgr  = cv2.imread(image_path)
                load_bbox = True
            else:
                img_original_bgr = None

        elif input_type == 'video':      
            _, img_original_bgr = input_data.read()
            if video_frame < cur_frame:
                video_frame += 1
                continue
            # save the obtained video frames
            image_path = osp.join(args.out_dir, "frames", f"{cur_frame:05d}.jpg")
            if img_original_bgr is not None:
                video_frame += 1
                if args.save_frame:
                    gnu.make_subdir(image_path)
                    cv2.imwrite(image_path, img_original_bgr)
        
        elif input_type == 'webcam':
            _, img_original_bgr = input_data.read()

            if video_frame < cur_frame:
                video_frame += 1
                continue
            # save the obtained video frames
            image_path = osp.join(args.out_dir, "frames", f"scene_{cur_frame:05d}.jpg")
            if img_original_bgr is not None:
                video_frame += 1
                if args.save_frame:
                    gnu.make_subdir(image_path)
                    cv2.imwrite(image_path, img_original_bgr)
        else:
            assert False, "Unknown input_type"

        cur_frame +=1
        if img_original_bgr is None or cur_frame > args.end_frame:
            break   
        print("--------------------------------------")
        
        # 调整图像大小（如果需要）
        if img_original_bgr is not None and hasattr(args, 'resize_factor') and args.resize_factor != 1.0:
            h, w = img_original_bgr.shape[:2]
            new_h, new_w = int(h * args.resize_factor), int(w * args.resize_factor)
            img_original_bgr = cv2.resize(img_original_bgr, (new_w, new_h))
        
        # bbox detection
        if not load_bbox:
            body_bbox_list, hand_bbox_list = list(), list()
        
        # regression (includes integration)
        body_bbox_list, hand_bbox_list, pred_output_list = run_regress(
            args, img_original_bgr, 
            body_bbox_list, hand_bbox_list, bbox_detector,
            body_mocap, hand_mocap)

        # 应用姿态平滑
        if len(pred_output_list) > 0:
            for i in range(len(pred_output_list)):
                pred_output_list[i] = pose_smoother.smooth_pose(pred_output_list[i])

        # save the obtained body & hand bbox to json file
        if args.save_bbox_output: 
            demo_utils.save_info_to_json(args, image_path, body_bbox_list, hand_bbox_list)

        if len(body_bbox_list) < 1: 
            print(f"No body deteced: {image_path}")
            continue

        pred_mesh_list = demo_utils.extract_mesh_from_output(pred_output_list)

        # visualization
        res_img = visualizer.visualize(
            img_original_bgr,
            pred_mesh_list = pred_mesh_list,
            body_bbox_list = body_bbox_list,
            hand_bbox_list = hand_bbox_list)
        
        # show result in the screen
        if not args.no_display:
            res_img = res_img.astype(np.uint8)
            ImShow(res_img)

        # save result image
        if args.out_dir is not None:
            demo_utils.save_res_img(args.out_dir, image_path, res_img)

        # save predictions to pkl
        if args.save_pred_pkl:
            demo_type = 'frank'
            demo_utils.save_pred_to_pkl(
                args, demo_type, image_path, body_bbox_list, hand_bbox_list, pred_output_list)

        # 帧率控制 - 等待合适的时间以保持恒定帧率
        processing_time = time.time() - start_time
        if processing_time < frame_time:
            wait_time = int((frame_time - processing_time) * 1000)
            # 对于屏幕显示，我们可以用waitKey来控制帧率
            if not args.no_display:
                key = cv2.waitKey(wait_time)
                if key == 27:  # ESC键
                    break
        else:
            # 处理太慢，没有额外等待
            if not args.no_display:
                key = cv2.waitKey(1)
                if key == 27:  # ESC键
                    break
        
        if processing_time > frame_time:
            print(f"警告: 处理速度跟不上目标帧率 (处理时间: {processing_time:.3f}s, 目标: {frame_time:.3f}s)")
            
        print(f"Processed : {image_path}")

    # save images as a video
    if not args.no_video_out and input_type in ['video', 'webcam']:
        demo_utils.gen_video_out(args.out_dir, args.seq_name)

    if input_type =='webcam' and input_data is not None:
        input_data.release()
    cv2.destroyAllWindows()
def main():
    args = DemoOptions().parse()
    args.use_smplx = True

    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    assert torch.cuda.is_available(), "Current version only supports GPU"

    hand_bbox_detector =  HandBboxDetector('third_view', device)
    
    #Set Mocap regressor
    body_mocap = BodyMocap(args.checkpoint_body_smplx, args.smpl_dir, device = device, use_smplx= True)
    hand_mocap = HandMocap(args.checkpoint_hand, args.smpl_dir, device = device)

    # Set Visualizer
    if args.renderer_type in ['pytorch3d', 'opendr']:
        from renderer.screen_free_visualizer import Visualizer
    else:
        from renderer.visualizer import Visualizer
    visualizer = Visualizer(args.renderer_type)

    run_frank_mocap(args, hand_bbox_detector, body_mocap, hand_mocap, visualizer)


if __name__ == '__main__':
    main()