import cv2
from ultralytics import YOLO
import json
import time
import copy
import numpy as np
from pose.detect import pose_detect
from matching.match import pose_match
from AlphaPose.scripts.demo_inference import init_parser, load_model
from AlphaPose.detector.apis import get_detector
from utils.save_pairs import save_pairs, save_pairs_face
from utils.save_video import save_video
from utils.generate_frame import generate_frame, add_json
from utils.filter import filter
from projection.projection import projection

Root_dir = "/home/airport/Airport_check/"
Data_dir = Root_dir + "data/"
Output_dir = Root_dir + "output/"
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1,3'

current_id = 2023

args_pose, cfg_pose = init_parser()

detector_pose = get_detector(args_pose)
# model_track = YOLO(Data_dir + "weights/yolov8x.pt")
model_track = YOLO("./ultralytics/scripts/demo_5k/yolo100e_m/weights/best.pt")
model_face = YOLO(Data_dir + "weights/faces.pt")
model_pose, dataset_pose = load_model(args_pose, cfg_pose)


source_track = Data_dir+ "origin/edited2.mp4"
source_face = Data_dir+ "origin/edited.mp4"

cap_track = cv2.VideoCapture(source_track)
cap_face = cv2.VideoCapture(source_face)

#TODO: 对齐两个视频流
#TODO: 简化结果，判断重复结果
luggage_classes = [0]

frame_id = 0
limit_id = 1000
final_matches = []

os.makedirs(f"{Output_dir}{current_id}", exist_ok=True)
video_writer = None
while cap_track.isOpened() and cap_face.isOpened() and frame_id < limit_id:
    success_track, frame_track = cap_track.read()
    success_face, frame_face = cap_face.read()

    if not success_track or not success_face:
        break
    frame_id += 1
    print(frame_id)
    # if frame_id < 576:
    #     continue
    start_time = time.perf_counter()
    # 并行运行 三个模型 track & face & pose
    results_face_tmp = model_face.track(frame_face, persist=True)
    results_face = json.loads(copy.deepcopy(results_face_tmp[0].tojson()))
    proj_face, project_result = projection(results_face[0]['box'].values(), frame_track)
    
    # cv2.imwrite(f"/home/airport/Airport_check/output/2023/project_res_{frame_id}.jpg", project_result)

    results_track_detect_tmp = model_track.track(frame_track, persist=True)

    results_track_pose = pose_detect(frame_track, Data_dir, current_id, frame_id, args_pose, cfg_pose, model_pose, dataset_pose, detector_pose)
    results_track_detect = json.loads(copy.deepcopy(results_track_detect_tmp[0].tojson()))

    # results_track_detect = filter(results_track_detect) # filter the result of human and boxes
    results_pairs = pose_match(frame_track, results_track_pose, results_track_detect, current_id, frame_id, Output_dir)

    save_pairs_face(frame_face, results_track_detect, proj_face, f"{Output_dir}{current_id}/")
    final_matches, new_matches = save_pairs(frame_track, results_track_detect, results_pairs, final_matches, f"{Output_dir}{current_id}/")
    # Debug
    # with open("/home/airport/Airport_check/data/bodies/2023_out/alphapose-results.json", 'r') as openfile:
    #     results_track_pose = json.load(openfile)
    
    # Debug
    annotated_detect = results_track_detect_tmp[0].plot()
    annotated_face = results_face_tmp[0].plot()
    
    # cv2.imwrite(f"/home/airport/Airport_check/output/2023/yolo_res_{frame_id}.jpg", annotated_detect)

    # If Save
    if True:
        with open(f"{Output_dir}{current_id}/results_face.json", 'w') as f:
            json.dump(results_face, f, indent=4)
        with open(f"{Output_dir}{current_id}/results_track_detect.json", 'w') as f:
            json.dump(results_track_detect, f, indent=4)
        with open(f"{Output_dir}{current_id}/results_track_pose.json", 'w') as f:
            json.dump(results_track_pose, f, indent=4)
            pass
        with open(f"{Output_dir}{current_id}/results_pairs.json", 'w') as f:
            json.dump(results_pairs, f, indent=4)
        with open(f"{Output_dir}{current_id}/results_matches.json", 'w') as f:
            json.dump(final_matches, f, indent=4)

    concatenated_result = generate_frame(results_track_detect, final_matches, f"{Output_dir}{current_id}")
    concatenated_image = np.concatenate((annotated_detect, add_json(new_matches, annotated_face)), axis=0)
    if concatenated_result is not None:
        concatenated_result = cv2.resize(concatenated_result, (concatenated_image.shape[1], concatenated_result.shape[0]))
        concatenated_image = np.concatenate((concatenated_image, concatenated_result))
    # cv2.imwrite(f"/home/airport/Airport_check/output/2023/concatenated_{frame_id}.jpg", concatenated_image)

    concatenated_image = cv2.resize(concatenated_image, (1600, 1600))
    video_writer = save_video(video_writer, concatenated_image, Output_dir, "video")
    end_time = time.perf_counter()
    print(f"Frame ID: {frame_id} Time cosuming: {end_time - start_time}s")

video_writer.release()
cap_track.release()
cap_face.release()
