import glob
import os
import time
import threading
from ced_inference_measure import CEDInference

model_weights = "weights/best_0809.pt"
gpu_config = {"cuda:0": 2, "cuda:1": 2}
save_dir = "results"
conf_thres = 0.5
batch_size = 3

# 启动推理实例
inference_instance = CEDInference(
    weight_file=model_weights,
    gpu_executors=gpu_config,
    result_save_path=save_dir,
    threshold=conf_thres,
    batch_size=batch_size
)

# -------- 监控线程的共享标志 --------
stop_monitor = False          # 控制 metrics_monitor
stop_results_monitor = False  # 控制 results_monitor

# -------- 线程 1：实时吞吐量监控 --------
def metrics_monitor(inf_obj):
    """
    每秒打印一次实时吞吐量。
    """
    while not stop_monitor:
        thrpt = inf_obj.get_throughput()
        print(f"[Metrics Monitor] Throughput: {thrpt:.4f} imgs/s")
        time.sleep(1)

# -------- 线程 2：结果目录增量扫描 --------
def results_monitor(directory, exts):
    """
    每秒扫描一次指定目录，将新增图片路径（绝对路径）逐个打印。
    """
    seen = set()
    while not stop_results_monitor:
        current = {
            os.path.abspath(p) for p in glob.glob(os.path.join(directory, "*"))
            if os.path.splitext(p)[1].lower() in exts
        }
        # 找到新出现的文件
        new_files = current - seen
        for img_path in sorted(new_files):
            print(f"[Results Monitor] {img_path}")
        seen |= new_files
        time.sleep(1)

# -------- 启动两个监控线程 --------
monitor_thread = threading.Thread(target=metrics_monitor, args=(inference_instance,))
results_thread = threading.Thread(
    target=results_monitor,
    args=(save_dir, {".jpg", ".png", ".jpeg", ".bmp"})
)
monitor_thread.start()
results_thread.start()

# -------- 推理执行 --------
inference_instance.start_inference_executors()

images_folder = "img_all"
exts = {".jpg", ".png", ".jpeg", ".bmp"}
all_imgs = [
    f for f in glob.glob(os.path.join(images_folder, "*")) 
    if os.path.splitext(f)[1].lower() in exts
]

for img_file in all_imgs:
    if os.path.splitext(img_file)[1].lower() in exts:
        inference_instance.read_input_image(img_file)

# 等待所有推理任务完成
inference_instance.stop_all_executors()
print("All inference tasks have been completed.")

# -------- 通知线程停止并等待退出 --------
stop_monitor = True
stop_results_monitor = True
monitor_thread.join()
results_thread.join()

# -------- 打印最终统计 --------
avg_latency = inference_instance.get_avg_latency()
throughput = inference_instance.get_throughput()
print(f"Final Average Latency: {avg_latency:.4f} s")
print(f"Final Throughput: {throughput:.4f} imgs/s")
