import glob
import os
import time
import threading
import json
from ced_inference_measure_file import CEDInference

# ───────────────── Inference / Path Configuration ─────────────────────────
task_name      = "YOLO"
images_folder  = "image_jiaotong"
model_weights  = "weights/jiaotong_yolo.pt"
gpu_config     = {"cuda:0": 2, "cuda:1": 2}
save_dir       = "image_jiaotong_result"
conf_thres     = 0.5
batch_size     = 3

# ───────────────── Monitoring Configuration ───────────────────────────────
monitor_interval = 1.0                      # seconds
monitor_log_file = "log/monitor_log_jiaotong1.jsonl"  # output file
os.makedirs(os.path.dirname(monitor_log_file), exist_ok=True)

# Fixed metadata (set once)
meta_info = {
    "task_id": "1",
    "type": task_name,
    "detail": "jiaotong detection",
    "original_img_path": os.path.abspath(images_folder),
    "result_img_path": os.path.abspath(save_dir),
}

# ───────────────── Create Inference Instance ──────────────────────────────
inference_instance = CEDInference(
    weight_file=model_weights,
    gpu_executors=gpu_config,
    result_save_path=save_dir,
    threshold=conf_thres,
    batch_size=batch_size,
)

# ───────────────── Monitoring Thread ──────────────────────────────────────
stop_monitor = False  # global flag for clean exit


def unified_monitor(inf_obj, interval, meta, log_file):
    """
    Periodically pull per-image information via `get_image_info()`, compute
    throughput and error statistics, and append two JSON lines (summary and
    detail) to `log_file`.
    """
    seen_results = set()  # absolute result paths that have been logged
    error_set    = set()  # absolute result paths where error == True

    # clear previous content
    open(log_file, "w").close()

    while not stop_monitor:
        # Latest list from all workers
        all_info = inf_obj.get_image_info()

        # Filter new entries only
        new_info = [
            item for item in all_info
            if item["result_img_path"] not in seen_results
        ]

        # Update error set
        for item in new_info:
            if item["error"]:
                error_set.add(item["result_img_path"])

        # Build JSON objects
        summary_json = {
            "task_id":           meta["task_id"],
            "type":              meta["type"],
            "detail":            meta["detail"],
            "throughput":        inf_obj.get_throughput(),
            "original_img_path": meta["original_img_path"],
            "result_img_path":   meta["result_img_path"],
            "total_img_num":     len(all_info),
            "error_img_num":     0,
        }

        detail_json = {
            "id":       meta["task_id"],
            "img_list": new_info,          # only newly observed images
        }

        # Append to log file (one JSON per line)
        with open(log_file, "a", encoding="utf-8") as f:
            f.write(json.dumps(summary_json, ensure_ascii=False) + "\n")
            f.write(json.dumps(detail_json,   ensure_ascii=False) + "\n")

        # Optional console debug
        print("[Monitor] summary:", summary_json)
        if new_info:
            print("[Monitor] detail:", detail_json)

        # Mark these results as logged
        seen_results.update(item["result_img_path"] for item in new_info)

        time.sleep(interval)


# Start monitor thread (daemon=True lets the program exit cleanly)
monitor_thread = threading.Thread(
    target=unified_monitor,
    args=(inference_instance, monitor_interval, meta_info, monitor_log_file),
    daemon=True,
)
monitor_thread.start()

# ───────────────── Inference Workflow ─────────────────────────────────────
exts = {".jpg", ".png", ".jpeg", ".bmp"}
all_imgs = [
    f for f in glob.glob(os.path.join(images_folder, "*"))
    if os.path.splitext(f)[1].lower() in exts
]

inference_instance.start_inference_executors()

for img_file in all_imgs:
    inference_instance.read_input_image(img_file)

# Wait until all tasks are done
inference_instance.stop_all_executors()
print("All inference tasks have been completed.")

# Stop monitor thread
stop_monitor = True
monitor_thread.join()

# Final statistics
avg_latency = inference_instance.get_avg_latency()
throughput  = inference_instance.get_throughput()
print(f"Final Average Latency: {avg_latency:.4f} s")
print(f"Final Throughput: {throughput:.4f} imgs/s")
