#!/usr/bin/env python
"""
FrankMoCap demo without visualization - only data extraction
"""

import os
import os.path as osp
import torch
import numpy as np
import cv2

import sys
sys.path.append('.')

from demo.demo_options import DemoOptions
import mocap_utils.general_utils as gnu
from mocap_utils import demo_utils

from bodymocap.body_mocap_api import BodyMocap
from handmocap.hand_bbox_detector import HandBboxDetector
from handmocap.hand_mocap_api import HandMocap

from integration.copy_and_paste import integration_copy_paste

def __filter_bbox_list(body_bbox_list, hand_bbox_list, single_person):
    # Filter bbox (choose the largest one, if single_person mode)
    if single_person and len(body_bbox_list) > 1:
        # pick the biggest bbox
        bbox_size =  [ (x[2] * x[3]) for x in body_bbox_list]
        idx_big = bbox_size.index(max(bbox_size))

        body_bbox_list = [body_bbox_list[idx_big]]
        hand_bbox_list = [hand_bbox_list[idx_big]]

    return body_bbox_list, hand_bbox_list

def run_regress(args, img_original_bgr, body_bbox_list, hand_bbox_list, bbox_detector, body_mocap, hand_mocap):
    cond1 = len(body_bbox_list) > 0 and len(hand_bbox_list) > 0
    cond2 = not args.frankmocap_fast_mode

    # use pre-computed bbox or use slow detection mode
    if cond1 or cond2:
        if not cond1 and cond2:
            # run detection only when bbox is not available
            body_pose_list, body_bbox_list, hand_bbox_list, _ = \
                bbox_detector.detect_hand_bbox(img_original_bgr.copy())
        else:
            print("Use pre-computed bounding boxes")

        if len(body_bbox_list) < 1:
            return list(), list(), list()

        # sort the bbox using bbox size
        # only keep one bbox if args.single_person is set
        body_bbox_list, hand_bbox_list = __filter_bbox_list(
            body_bbox_list, hand_bbox_list, args.single_person)

        # hand & body pose regression
        pred_hand_list = hand_mocap.regress(
            img_original_bgr, hand_bbox_list, add_margin=True)
        pred_body_list = body_mocap.regress(img_original_bgr, body_bbox_list)

    else:
        _, body_bbox_list = bbox_detector.detect_body_bbox(img_original_bgr.copy())

        if len(body_bbox_list) < 1:
            return list(), list(), list()

        # sort the bbox using bbox size
        body_bbox_list, _ = __filter_bbox_list(body_bbox_list, None, args.single_person)

        # body pose regression first
        pred_body_list = body_mocap.regress(img_original_bgr, body_bbox_list)
        pred_hand_list = None
        hand_bbox_list = [None, ] * len(body_bbox_list)

    # integration by copy and paste
    pred_output_list = integration_copy_paste(
        pred_body_list, pred_hand_list, body_mocap.smpl, img_original_bgr.shape)

    return body_bbox_list, hand_bbox_list, pred_output_list

def main():
    args = DemoOptions().parse()
    args.use_smplx = True

    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    assert torch.cuda.is_available(), "Current version only supports GPU"

    # Initialize detectors and models
    hand_bbox_detector = HandBboxDetector('third_view', device)
    body_mocap = BodyMocap(args.checkpoint_body_smplx, args.smpl_dir, device=device, use_smplx=True)
    hand_mocap = HandMocap(args.checkpoint_hand, args.smpl_dir, device=device)

    # Process video
    input_type = 'video'
    assert osp.isfile(args.input_path), f"Input video not found: {args.input_path}"

    cap = cv2.VideoCapture(args.input_path)
    assert cap.isOpened(), f"Failed to open video: {args.input_path}"

    # Get video info
    video_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    print(f"Input video: {video_frame_count} frames, {fps} fps")

    # Output setup
    gnu.make_subdir(args.out_dir)
    seq_name = osp.basename(args.input_path).split('.')[0]
    args.seq_name = seq_name

    # Process frames
    cur_frame = args.start_frame
    video_frame = 0

    while True:
        ret, img_original_bgr = cap.read()
        if not ret or cur_frame > args.end_frame:
            break

        if video_frame < cur_frame:
            video_frame += 1
            continue

        print(f"Processing frame {cur_frame}/{args.end_frame}")

        # Save frame if requested
        if args.save_frame:
            image_path = osp.join(args.out_dir, "frames", f"frame_{cur_frame:05d}.jpg")
            gnu.make_subdir(image_path)
            cv2.imwrite(image_path, img_original_bgr)
        else:
            image_path = f"frame_{cur_frame:05d}.jpg"

        # Process mocap
        body_bbox_list, hand_bbox_list, pred_output_list = run_regress(
            args, img_original_bgr, [], [], hand_bbox_detector, body_mocap, hand_mocap)

        # Save bbox if requested
        if args.save_bbox_output:
            demo_utils.save_info_to_json(args, image_path, body_bbox_list, hand_bbox_list)

        if len(pred_output_list) < 1:
            print(f"No detection in frame {cur_frame}")
            cur_frame += 1
            video_frame += 1
            continue

        # Save predictions
        if args.save_pred_pkl:
            demo_type = 'frank'
            demo_utils.save_pred_to_pkl(args, demo_type, image_path, body_bbox_list, hand_bbox_list, pred_output_list)

        print(f"Saved frame {cur_frame} with {len(pred_output_list)} detections")

        cur_frame += 1
        video_frame += 1

    cap.release()
    print("Processing completed!")

    # Show results
    print(f"\nResults saved in: {args.out_dir}")
    if os.path.exists(os.path.join(args.out_dir, "mocap")):
        pkl_files = [f for f in os.listdir(os.path.join(args.out_dir, "mocap")) if f.endswith('.pkl')]
        print(f"Generated {len(pkl_files)} PKL files")

if __name__ == "__main__":
    main()
