# export https_proxy=http://proxy.329509.xyz:7890
# export NVI_NOTIFY_IGNORE_TASK=1
import os

from sympy import use

os.environ["https_proxy"] = "http://proxy.329509.xyz:7890"

import multiprocessing

import shutil
import random
import torch
import cv2
import numpy as np
from pathlib import Path
from time import sleep as time_sleep

import time

from boxmot import BotSort, OcSort, ByteTrack, StrongSort, DeepOcSort, ImprAssocTrack

import tqdm

from nvitop import select_devices

from group_center.user_tools import *

group_center_set_user_name("konghaomin")


def generate_params(
    task_index: int,
    dir_path: str,
    det_name: str,
    tracker_name: str,
    reid_path: str = "clip_vehicleid.pt",
    confidence_threshold: float = 0.5,
):
    return (
        task_index,
        dir_path,
        det_name,
        tracker_name,
        reid_path,
        confidence_threshold,
    )


def get_torch_device_index(task_index: int = -1) -> int:
    if task_index >= 0:
        device_index = task_index % 8
        return device_index

    # raise NotImplementedError("Not implemented")

    device_list = []
    while len(device_list) == 0:
        device_list = select_devices(
            min_count=1, max_count=8, min_free_memory="4GiB", sort=True
        )

        if len(device_list) > 0:
            break

        time_sleep(2)

    print(f"device_list:", device_list)
    final_device_index = device_list[-1]

    return final_device_index


def get_torch_device(task_index: int = -1) -> torch.device:
    device_index = get_torch_device_index(task_index=task_index)
    device = torch.device(device_index)
    return device


# 随机延时,单位秒s
def random_sleep(min_time: float = 0, max_time: float = 0.1):
    time_sleep(np.random.uniform(min_time, max_time))


def handle_dir(
    task_index: int,
    dir_path: str,
    det_name: str,
    tracker_name: str,
    reid_path: str = "clip_vehicleid.pt",
    confidence_threshold: float = 0.5,
):

    def get_reid_weights_device():
        if task_index < 0:
            random_sleep(2, 10)

        device = get_torch_device(task_index=task_index)

        reid_weights = Path(reid_path)

        return reid_weights, device

    if tracker_name == "BotSort":
        reid_weights, device = get_reid_weights_device()
        tracker = BotSort(reid_weights=reid_weights, device=device, half=False)
    elif tracker_name == "OcSort":
        tracker = OcSort()
    elif tracker_name == "ByteTrack":
        tracker = ByteTrack()
    elif tracker_name == "StrongSort":
        reid_weights, device = get_reid_weights_device()
        tracker = StrongSort(reid_weights=reid_weights, device=device, half=False)
    elif tracker_name == "DeepOcSort":
        reid_weights, device = get_reid_weights_device()
        tracker = DeepOcSort(reid_weights=reid_weights, device=device, half=False)
    elif tracker_name == "ImprAssocTrack":
        reid_weights, device = get_reid_weights_device()
        tracker = ImprAssocTrack(reid_weights=reid_weights, device=device, half=False)

    model_name = f"{det_name}-{tracker_name}"

    video_name = os.path.basename(dir_path)
    parent_dir = os.path.dirname(dir_path)
    base_save_dir = os.path.join(os.path.dirname(parent_dir), "SORT_Result", model_name)
    save_dir = os.path.join(base_save_dir, "data")
    frame_save_dir = os.path.join(base_save_dir, video_name)
    os.makedirs(save_dir, exist_ok=True)
    if os.path.exists(frame_save_dir):
        shutil.rmtree(frame_save_dir)
    os.makedirs(frame_save_dir, exist_ok=True)
    save_path = os.path.join(save_dir, video_name + ".txt")

    base_det_save_dir = os.path.join(os.path.dirname(parent_dir), "Det", det_name)
    det_video_dir_path = os.path.join(base_det_save_dir, video_name)

    img_dir = os.path.join(dir_path, "img1")
    if not os.path.exists(img_dir):
        return

    img_list = os.listdir(img_dir)
    img_list = [
        img_name.replace(".jpg", "")
        for img_name in img_list
        if img_name.endswith(".jpg")
    ]
    img_list.sort()
    img_list = [img_name + ".jpg" for img_name in img_list]

    predict_mot_lines = []

    center_point_list = {}

    frame_index = 0
    frame_count = len(img_list)
    for image_name in tqdm.tqdm(img_list):
        image_path = os.path.join(img_dir, image_name)

        frame_index += 1
        print(f"Processing frame {frame_index}/{frame_count}")

        frame = cv2.imread(image_path)

        # Filter the detections (e.g., based on confidence threshold)
        # confidence_threshold = 0.5
        dets = []

        # Get Detection Results
        txt_path = os.path.join(det_video_dir_path, image_name.replace(".jpg", ".txt"))

        if not os.path.exists(txt_path):
            raise FileNotFoundError(f"File not found: {txt_path}")

        with open(txt_path, "r") as f:
            lines = f.readlines()
            for line in lines:
                line = line.strip()
                if line:
                    parts = line.split()
                    x1, y1, x2, y2, conf, cls = map(float, parts)
                    cls = int(cls)
                    if conf >= confidence_threshold:
                        dets.append([x1, y1, x2, y2, conf, cls])

        # Convert detections to numpy array (N X (x, y, x, y, conf, cls))
        dets = np.array(dets)

        # Update the tracker
        res = tracker.update(dets, frame)  # --> M X (x, y, x, y, id, conf, cls, ind)

        # tracker.plot_results(frame, show_trajectories=True)

        # Plot Frame Info
        cv2.putText(
            frame,
            f"Frame: {frame_index:03}/{frame_count:03}    Video Name: {video_name}",
            (10, 30),
            cv2.FONT_HERSHEY_SIMPLEX,
            1,
            (0, 255, 0),
            2,
        )

        for re in res:
            x1, y1, x2, y2, id, conf, cls, ind = map(float, re)
            id = int(id)

            center_x = (x1 + x2) / 2
            center_y = (y1 + y2) / 2

            if id not in center_point_list.keys():
                center_point_list[id] = []

            center_point_list[id].append((center_x, center_y))

            cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2)
            # Draw label
            label = f"{id}({len(center_point_list[id])}): {conf:.2f}"
            cv2.putText(
                frame,
                label,
                (int(x1), int(y1) + 10),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (
                    0,
                    255,
                ),
                2,
            )

            x, y, w, h = (int(x1), int(y1), int(x2 - x1), int(y2 - y1))

            # Draw center point list
            for center_point in center_point_list[id]:
                cv2.circle(
                    frame,
                    (int(center_point[0]), int(center_point[1])),
                    2,
                    (0, 255, 0),
                    2,
                )

            # https://motchallenge.net/instructions/
            predict_mot_lines.append(
                f"{frame_index},"
                f"{id},"
                f"{x},"
                f"{y},"
                f"{w},"
                f"{h},"
                # f"1,"
                # f"1,"
                f"{conf:.6f}"
                ","
                "-1,"
                "-1,"
                "-1"
            )

        image_save_path = os.path.join(frame_save_dir, video_name + "_" + image_name)
        # image_save_path = image_save_path.replace("jpg", "png")

        cv2.imwrite(image_save_path, frame)

    with open(save_path, "w") as f:
        f.write("\n".join(predict_mot_lines).strip())


if __name__ == "__main__":
    multiprocessing.set_start_method("spawn")

    base_dir_path = "/home/konghaomin/Datasets/MaritimeTrack_Full_Same/DanceTrack/val"

    base_dir_path = os.path.abspath(base_dir_path)

    ls_dir = os.listdir(base_dir_path)

    dir_list = []
    for dir in ls_dir:
        dir_path = os.path.join(base_dir_path, dir)
        if not os.path.isdir(dir_path):
            continue

        dir_list.append(dir_path)

    tracker_list = [
        "OcSort",
        "ByteTrack",
        "BotSort",
        "StrongSort",
        "DeepOcSort",
        "ImprAssocTrack",
    ]

    params_list = []

    # shuffle
    random.shuffle(dir_list)

    task_index = 0
    for dir in dir_list:
        for tracker in tracker_list:
            task_index += 1
            params_list.append(
                generate_params(
                    task_index=task_index,
                    dir_path=dir,
                    det_name="v11x",
                    tracker_name=tracker,
                    reid_path="clip_vehicleid.pt",
                    confidence_threshold=0.1,
                )
            )

    start_time = time.time()

    machine_user_message_via_local_nvi_notify(
        "Start SORT Infer:" + ",".join(tracker_list)
    )

    with multiprocessing.Pool(16) as pool:
        pool.starmap(handle_dir, params_list)

    end_time = time.time()

    print("done")

    used_time = round((end_time - start_time) / 60, 1)

    print(f"Time: {used_time} min")

    machine_user_message_via_local_nvi_notify(
        "SORT Infer Done, Time: " + str(used_time) + " min"
    )
