import glob
import os
import time
import threading
import xml.etree.ElementTree as ET
from ced_inference_measure import CEDInference

task = "dianwang"
images_folder = "img_all"
model_weights = "weights/best_0809.pt"
gpu_config = {"cuda:0": 2, "cuda:1": 2}
save_dir = "results"
conf_thres = 0.5
batch_size = 3

# 启动推理实例
inference_instance = CEDInference(
    weight_file=model_weights,
    gpu_executors=gpu_config,
    result_save_path=save_dir,
    threshold=conf_thres,
    batch_size=batch_size
)

# -------- 监控线程的共享标志 --------
stop_monitor = False          # 控制 metrics_monitor
stop_results_monitor = False  # 控制 results_monitor

# -------- 线程 1：实时吞吐量监控 --------
def metrics_monitor(inf_obj):
    """
    每秒打印一次实时吞吐量。
    """
    while not stop_monitor:
        thrpt = inf_obj.get_throughput()
        print(f"[Metrics Monitor] Throughput: {thrpt:.4f} imgs/s")
        time.sleep(1)

# -------- 线程 2：结果目录增量扫描 --------
def results_monitor(directory, exts):
    """
    每秒扫描一次目录，执行 3 件事：
      1) 对新增图片输出 [[绝对路径, 0/1], ...]   (0=正常, 1=异常)
      2) 打印当前已检测图片总数 (num_detected)
      3) 打印累计异常图片总数   (num_abnormal)
    """
    seen_imgs    = set()  # 已处理过的图片绝对路径
    abnormal_set = set()  # 累计异常图片的绝对路径

    while not stop_results_monitor:
        # 目录下所有检测结果图片（绝对路径集合）
        current_imgs = {
            os.path.abspath(p) for p in glob.glob(os.path.join(directory, "*"))
            if os.path.splitext(p)[1].lower() in exts
        }

        new_imgs = current_imgs - seen_imgs
        status_list = []

        for img_path in sorted(new_imgs):
            base = os.path.splitext(os.path.basename(img_path))[0]
            xml_path = os.path.join(directory, base + ".xml")

            # 默认异常 = 1；若 XML 存在且无 <object> 则正常 = 0
            abnormal = 1
            if os.path.exists(xml_path):
                try:
                    root = ET.parse(xml_path).getroot()
                    if not root.findall(".//object"):
                        abnormal = 0
                except ET.ParseError:
                    pass   # 保持异常=1

            if abnormal == 1:
                abnormal_set.add(img_path)   # 记录绝对路径

            status_list.append([img_path, abnormal])  # 使用绝对路径

        # 打印统计
        num_detected = len(current_imgs)
        num_abnormal = len(abnormal_set)
        print(f"[Results Monitor] Detected images: {num_detected}, "
              f"Abnormal images: {num_abnormal}")

        # 有新图片时打印详细列表
        if status_list:
            print(f"[Results Monitor] {status_list}")

        seen_imgs |= new_imgs
        time.sleep(1)


# -------- 启动两个监控线程 --------
monitor_thread = threading.Thread(target=metrics_monitor, args=(inference_instance,))
results_thread = threading.Thread(
    target=results_monitor,
    args=(save_dir, {".jpg", ".png", ".jpeg", ".bmp"})
)
monitor_thread.start()
results_thread.start()

# -------- 推理执行 --------
inference_instance.start_inference_executors()

exts = {".jpg", ".png", ".jpeg", ".bmp"}
all_imgs = [
    f for f in glob.glob(os.path.join(images_folder, "*")) 
    if os.path.splitext(f)[1].lower() in exts
]

for img_file in all_imgs:
    if os.path.splitext(img_file)[1].lower() in exts:
        inference_instance.read_input_image(img_file)

# 等待所有推理任务完成
inference_instance.stop_all_executors()
print("All inference tasks have been completed.")

# -------- 通知线程停止并等待退出 --------
stop_monitor = True
stop_results_monitor = True
monitor_thread.join()
results_thread.join()

# -------- 打印最终统计 --------
avg_latency = inference_instance.get_avg_latency()
throughput = inference_instance.get_throughput()
print(f"Final Average Latency: {avg_latency:.4f} s")
print(f"Final Throughput: {throughput:.4f} imgs/s")
