import glob
import os
import time
import threading
import xml.etree.ElementTree as ET
import json
from ced_inference_measure import CEDInference

# ──────────────── 推理与路径配置 ────────────────
task_name      = "YOLO"
images_folder  = "image_dianli"
model_weights  = "weights/dianli_yolo.pt"
gpu_config     = {"cuda:0": 2, "cuda:1": 2}
save_dir       = "image_dianli_result"
conf_thres     = 0.5
batch_size     = 3

# ──────────────── 监控参数 ────────────────
monitor_interval = 1.0                # 监控周期（秒）
monitor_log_file = "log/monitor_log.jsonl"  # 输出文件
os.makedirs(os.path.dirname(monitor_log_file), exist_ok=True)  # 如无则建

# 固定元信息（一次性手动设定即可）
meta_info = {
    "task_id": "1",
    "type": task_name,
    "detail": "电网设备巡检",
    "original_img_path": os.path.abspath(images_folder),
    "result_img_path": os.path.abspath(save_dir),
}

# ──────────────── 推理实例 ────────────────
inference_instance = CEDInference(
    weight_file=model_weights,
    gpu_executors=gpu_config,
    result_save_path=save_dir,
    threshold=conf_thres,
    batch_size=batch_size
)

# ──────────────── 监控线程 ────────────────
stop_monitor = False  # 线程退出标志

def unified_monitor(inf_obj, directory, exts, interval, meta, log_file):
    """
    综合监控线程：
      • 获取吞吐量
      • 统计已检测／异常图片数量
      • 生成两段 JSON 并写入日志文件
    """
    seen_imgs    = set()  # 已经处理过的结果图
    abnormal_set = set()  # 累计异常结果图

    # 清空旧日志
    open(log_file, "w").close()

    while not stop_monitor:
        # ---- 目录扫描 ----
        current_imgs = {
            os.path.abspath(p) for p in glob.glob(os.path.join(directory, "*"))
            if os.path.splitext(p)[1].lower() in exts
        }
        new_imgs = current_imgs - seen_imgs

        # ---- 每张新增图片的详细信息 ----
        img_list = []
        for res_img_path in sorted(new_imgs):
            base = os.path.splitext(os.path.basename(res_img_path))[0]
            xml_path = os.path.join(directory, base + ".xml")
            orig_img_path = os.path.abspath(
                os.path.join(meta["original_img_path"], base + os.path.splitext(res_img_path)[1])
            )

            # 默认异常；若 XML 存在且无 <object> 则正常
            is_error = True
            if os.path.exists(xml_path):
                try:
                    root = ET.parse(xml_path).getroot()
                    if not root.findall(".//object"):
                        is_error = False
                except ET.ParseError:
                    pass

            if is_error:
                abnormal_set.add(res_img_path)

            img_list.append({
                "original_img_path": orig_img_path,
                "result_img_path":  res_img_path,
                "error":           is_error
            })

        # ---- 概览 JSON ----
        summary_json = {
            "task_id":           meta["task_id"],
            "type":              meta["type"],
            "detail":            meta["detail"],
            "throughput":        inf_obj.get_throughput(),
            "original_img_path": meta["original_img_path"],
            "result_img_path":   meta["result_img_path"],
            "total_img_num":     len(current_imgs),
            "error_img_num":     len(abnormal_set),
        }

        # ---- 详细 JSON ----
        detail_json = {
            "id":       meta["task_id"],
            "img_list": img_list
        }

        # ---- 写入日志（每行一个 JSON，便于后续离线读取）----
        with open(log_file, "a", encoding="utf-8") as f:
            f.write(json.dumps(summary_json, ensure_ascii=False) + "\n")
            f.write(json.dumps(detail_json,   ensure_ascii=False) + "\n")

        # ---- 调试输出（可随时删除）----
        print("[Monitor] summary:", summary_json)
        if img_list:
            print("[Monitor] detail:", detail_json)

        seen_imgs |= new_imgs
        time.sleep(interval)

# 启动监控线程（daemon=True 避免主线程提前退出时阻塞）
monitor_thread = threading.Thread(
    target=unified_monitor,
    args=(inference_instance, save_dir, {".jpg", ".png", ".jpeg", ".bmp"},
          monitor_interval, meta_info, monitor_log_file),
    daemon=True
)
monitor_thread.start()

# ──────────────── 推理流程 ────────────────
exts = {".jpg", ".png", ".jpeg", ".bmp"}
all_imgs = [
    f for f in glob.glob(os.path.join(images_folder, "*"))
    if os.path.splitext(f)[1].lower() in exts
]

inference_instance.start_inference_executors()

for img_file in all_imgs:
    inference_instance.read_input_image(img_file)

# 等待所有推理任务完成
inference_instance.stop_all_executors()
print("All inference tasks have been completed.")

# 关闭监控线程
stop_monitor = True
monitor_thread.join()

# 输出最终统计
avg_latency = inference_instance.get_avg_latency()
throughput  = inference_instance.get_throughput()
print(f"Final Average Latency: {avg_latency:.4f} s")
print(f"Final Throughput: {throughput:.4f} imgs/s")
