# ============================================================================
# main_ubuntu.py (v6.1 - 纯数据版)
#
# 主要更新:
# 1. 【配置修改】关闭所有调试图片的保存功能，只进行检测和JSON文件生成。
# ============================================================================

import os
import cv2
import subprocess
import os.path as osp
import time
import json
import torch
import datetime
import threading
import logging
from logging.handlers import TimedRotatingFileHandler
from ultralytics import YOLO
import torchvision
from torchvision.ops import nms
import numpy as np

# ... setup_logging, get_single_frame_from_rtsp, YOLOv8_PyTorch_Detector 函数均保持不变 ...
def setup_logging(config):
    if not config.get("enable_logging", True): print("日志系统已禁用 (Logging is disabled)."); return
    logger = logging.getLogger(); logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
    if logger.hasHandlers(): logger.handlers.clear()
    ch = logging.StreamHandler(); ch.setFormatter(formatter); logger.addHandler(ch)
    logs_dir = config["logs_dir"]; os.makedirs(logs_dir, exist_ok=True)
    log_file_path = os.path.join(logs_dir, 'current_activity.log')
    fh = TimedRotatingFileHandler(log_file_path, when='H', interval=1, backupCount=24, encoding='utf-8'); fh.setFormatter(formatter); logger.addHandler(fh)
    logging.info(f"日志系统初始化完成。当前日志: {log_file_path}")

def get_single_frame_from_rtsp(rtsp_url, save_path):
    # logging.info(f"正在连接到RTSP流以抓取单帧: {rtsp_url}") # 在生产模式下可以注释掉这条，减少日志量
    cap = cv2.VideoCapture(rtsp_url, cv2.CAP_FFMPEG)
    if not cap.isOpened(): logging.error(f"无法打开RTSP流。"); return None
    for _ in range(5): ret, frame = cap.read()
    cap.release()
    if not ret or frame is None: logging.error("无法从流中读取到有效的帧。"); return None
    # os.makedirs(os.path.dirname(save_path), exist_ok=True) if save_path else None # 已不需要
    # logging.info(f"成功抓取单帧图像。") # 在生产模式下可以注释掉这条，减少日志量
    return frame

class YOLOv8_PyTorch_Detector:
    def __init__(self, model_path, device='cpu'):
        self.model_path = model_path; self.device = device; self.model = None
    def init(self):
        logging.info(f"正在加载模型: {self.model_path}")
        self.model = YOLO(self.model_path); self.model.to(self.device)
        logging.info(f"模型已加载到: '{self.device}'")

    def detect_persons_with_tiling(self, frame_to_detect, conf_thres, iou_thres):
        if not self.model: raise RuntimeError("推理器未初始化")
        img_h, img_w, _ = frame_to_detect.shape
        tile_size = 640; overlap = 0.2; step_size = int(tile_size * (1 - overlap))
        all_boxes = []; all_scores = []
        for y in range(0, img_h, step_size):
            for x in range(0, img_w, step_size):
                y_end = min(y + tile_size, img_h); x_end = min(x + tile_size, img_w)
                y_start = max(0, y_end - tile_size); x_start = max(0, x_end - tile_size)
                tile = frame_to_detect[y_start:y_end, x_start:x_end]
                if tile.shape[0] == 0 or tile.shape[1] == 0: continue
                results = self.model.predict(source=tile, conf=conf_thres, device=self.device, verbose=False)
                if results and results[0]:
                    for box in results[0].boxes:
                        if int(box.cls) == 0:
                            x1, y1, x2, y2 = box.xyxy[0]
                            abs_x1 = x_start + x1; abs_y1 = y_start + y1; abs_x2 = x_start + x2; abs_y2 = y_start + y2
                            all_boxes.append([abs_x1, abs_y1, abs_x2, abs_y2]); all_scores.append(float(box.conf[0]))
        if not all_boxes: return 0
        final_indices = nms(torch.tensor(all_boxes, dtype=torch.float32), torch.tensor(all_scores, dtype=torch.float32), iou_threshold=iou_thres)
        return len(final_indices)

# --- 主处理流程更新 ---
def process_camera_stream(camera_config, global_config, detector, exclusion_zones):
    RTSP_URL = camera_config["url"]; CAMERA_CODE = camera_config["code"]
    logging.info(f"开始新一轮处理: {CAMERA_CODE}")
    begin_time = int(time.time())

    original_frame = get_single_frame_from_rtsp(RTSP_URL, None)
    if original_frame is None: raise RuntimeError("抓取单帧失败")

    # 如果有排除区域，应用它们
    frame_for_detection = original_frame
    if CAMERA_CODE in exclusion_zones:
        frame_for_detection = original_frame.copy()
        cv2.fillPoly(frame_for_detection, pts=exclusion_zones[CAMERA_CODE], color=(0, 0, 0))
        # logging.info(f"已为摄像头 {CAMERA_CODE} 应用多边形排除区域。") # 可注释以减少日志

    tiling_conf_thres = global_config["tiling_conf_thres"]
    tiling_iou_thres = global_config["tiling_iou_thres"]

    # 执行分块检测，只获取人数
    tiling_person_count = detector.detect_persons_with_tiling(
        frame_to_detect=frame_for_detection,
        conf_thres=tiling_conf_thres,
        iou_thres=tiling_iou_thres
    )
    logging.info(f"检测完成: {CAMERA_CODE} | 人数: {tiling_person_count}")
    
    # 检查 save_debug_images 开关，由于设置为False，此代码块将不会执行
    if global_config["save_debug_images"]:
        # 为了让图片能画框，需要重新跑一次检测，但这在生产模式下是浪费
        # 这里我们假设如果save_debug_images为True，我们愿意承受这个开销
        _, frame_with_boxes = detector.detect_persons_with_tiling_and_draw(frame_for_detection, original_frame, tiling_conf_thres, tiling_iou_thres)
        DEBUG_DIR = global_config["debug_dir"]
        base_filename = f"{CAMERA_CODE}_{begin_time}"
        output_path = os.path.join(DEBUG_DIR, f"{base_filename}_detected_{tiling_person_count}p.jpg")
        cv2.imwrite(output_path, frame_with_boxes)
        logging.info(f"调试图片已保存至: {output_path}")

    # 直接将结果写入本地指定路径
    JSON_BASE_PATH = global_config["json_output_base_path"]
    person_num_data = {"placedata": {"HumanNumber": tiling_person_count}, "timestamp": begin_time}
    final_json_path = os.path.join(JSON_BASE_PATH, CAMERA_CODE, "personNum", "result_place.json")
    try:
        os.makedirs(os.path.dirname(final_json_path), exist_ok=True)
        with open(final_json_path, 'w') as f:
            json.dump(person_num_data, f, indent=2)
        # logging.info(f"JSON结果已直接写入: {final_json_path}") # 可注释以减少日志
    except Exception as e:
        logging.error(f"写入JSON文件到 {final_json_path} 时发生错误。", exc_info=True)

# --- camera_worker 和 main 函数保持不变 ---
def camera_worker(camera_config, global_config, detector, exclusion_zones):
    while True:
        cycle_start_time = time.time()
        try: process_camera_stream(camera_config, global_config, detector, exclusion_zones)
        except Exception as e: logging.error(f"处理流程发生严重错误: {camera_config['code']}", exc_info=True)
        elapsed_time = time.time() - cycle_start_time
        sleep_time = max(0, 5 - elapsed_time)
        logging.info(f"本轮 {camera_config['code']} 耗时 {elapsed_time:.2f} 秒，休眠 {sleep_time:.2f} 秒。")
        time.sleep(sleep_time)

# ============================================================================
# 主程序入口
# ============================================================================
def main():
    LOCAL_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    avatar_wall_poly = np.array([[1315, 442], [1386, 426], [1463, 413], [1548, 395], [1632, 387], [1718, 376], [1810, 369], [1979, 368], [2076, 371], [2161, 378], [2161, 350], [1315, 350]], dtype=np.int32)
    bottom_left_corner_poly = np.array([[617, 2141], [17, 1073], [0, 2159]], dtype=np.int32)
    bottom_right_corner_poly = np.array([[3117, 2145], [3825, 1037], [3839, 2159]], dtype=np.int32)
    EXCLUSION_ZONES = { "210235C71P3247000051": [avatar_wall_poly, bottom_left_corner_poly, bottom_right_corner_poly] }

    GLOBAL_CONFIG = {
        "device": 'cuda' if torch.cuda.is_available() else 'cpu',
        "base_dir": LOCAL_BASE_DIR,
        
        # --- 【核心修改】: 将此开关设置为 False ---
        "save_debug_images": False,
        
        "debug_dir": os.path.join(LOCAL_BASE_DIR, "debug_images"),
        "enable_logging": True,
        "logs_dir": os.path.join(LOCAL_BASE_DIR, "logs"),
        "json_output_base_path": "/data/smartedudata/minio/data/zlmediakit",
        "tiling_conf_thres": 0.6,
        "tiling_iou_thres": 0.3
    }

    setup_logging(GLOBAL_CONFIG)
    logging.info(f"项目根目录: {LOCAL_BASE_DIR}")
    if GLOBAL_CONFIG['device'] == 'cuda': logging.info("CUDA可用，将使用GPU进行加速。")
    else: logging.warning("CUDA不可用，将使用CPU。")
    MODEL_PATH = os.path.join(LOCAL_BASE_DIR, "models", "yolov8s.pt")
    if not os.path.exists(MODEL_PATH): logging.critical(f"致命错误：模型文件未找到！路径: {MODEL_PATH}"); return
    CAMERAS = [{"url": "rtsp://admin:Nanjing@8625@10.11.36.21:554/media/video1", "code": "210235C71P3247000051"}, {"url": "rtsp://admin:Nanjing@8625@10.11.36.22:554/media/video1", "code": "210235C71P3254000069"}]
    detector = YOLOv8_PyTorch_Detector(MODEL_PATH, device=GLOBAL_CONFIG["device"])
    try: detector.init()
    except Exception as e: logging.critical("AI模型初始化失败，程序退出。", exc_info=True); return
    threads = []
    for camera in CAMERAS:
        thread = threading.Thread(target=camera_worker, name=f"CameraThread-{camera['code']}", args=(camera, GLOBAL_CONFIG, detector, EXCLUSION_ZONES))
        thread.daemon = True; threads.append(thread); thread.start()
        logging.info(f"线程 {thread.name} 已启动。")
    try:
        while all(t.is_alive() for t in threads): time.sleep(1)
    except KeyboardInterrupt:
        logging.info("接收到键盘中断信号，程序正在退出...")
    logging.info("主程序退出。")

if __name__ == '__main__':
    # 为了进一步提升效率，我稍微优化了检测函数
    # 将画框的逻辑和只计数的逻辑分开
    def _draw_boxes(frame, all_boxes, all_scores, final_indices):
        frame_with_boxes = frame.copy()
        for i in final_indices:
            box = all_boxes[i]; score = all_scores[i]
            x1, y1, x2, y2 = map(int, box)
            cv2.rectangle(frame_with_boxes, (x1, y1), (x2, y2), (0, 255, 0), 2)
            label = f'Person {score:.2f}'; (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
            cv2.rectangle(frame_with_boxes, (x1, y1 - 20), (x1 + w, y1), (0, 255, 0), -1)
            cv2.putText(frame_with_boxes, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 1)
        return frame_with_boxes

    # 重构一下检测类，让纯数据模式更高效
    class YOLOv8_PyTorch_Detector_Optimized(YOLOv8_PyTorch_Detector):
        def detect_persons_with_tiling(self, frame_to_detect, conf_thres, iou_thres):
            # 这个版本只返回计数，不进行任何画图，效率最高
            if not self.model: raise RuntimeError("推理器未初始化")
            img_h, img_w, _ = frame_to_detect.shape
            tile_size = 640; overlap = 0.2; step_size = int(tile_size * (1 - overlap))
            all_boxes = []; all_scores = []
            for y in range(0, img_h, step_size):
                for x in range(0, img_w, step_size):
                    y_end = min(y + tile_size, img_h); x_end = min(x + tile_size, img_w)
                    y_start = max(0, y_end - tile_size); x_start = max(0, x_end - tile_size)
                    tile = frame_to_detect[y_start:y_end, x_start:x_end]
                    if tile.shape[0] == 0 or tile.shape[1] == 0: continue
                    results = self.model.predict(source=tile, conf=conf_thres, device=self.device, verbose=False)
                    if results and results[0]:
                        for box in results[0].boxes:
                            if int(box.cls) == 0:
                                x1, y1, x2, y2 = box.xyxy[0]
                                abs_x1 = x_start + x1; abs_y1 = y_start + y1; abs_x2 = x_start + x2; abs_y2 = y_start + y2
                                all_boxes.append([abs_x1, abs_y1, abs_x2, abs_y2]); all_scores.append(float(box.conf[0]))
            if not all_boxes: return 0
            final_indices = nms(torch.tensor(all_boxes, dtype=torch.float32), torch.tensor(all_scores, dtype=torch.float32), iou_threshold=iou_thres)
            return len(final_indices)
        
        def detect_persons_with_tiling_and_draw(self, frame_to_detect, original_frame_to_draw_on, conf_thres, iou_thres):
            # 这个版本在需要画图时调用，会返回计数和带框图片
            if not self.model: raise RuntimeError("推理器未初始化")
            img_h, img_w, _ = frame_to_detect.shape
            tile_size = 640; overlap = 0.2; step_size = int(tile_size * (1 - overlap))
            all_boxes = []; all_scores = []
            for y in range(0, img_h, step_size):
                for x in range(0, img_w, step_size):
                    y_end = min(y + tile_size, img_h); x_end = min(x + tile_size, img_w)
                    y_start = max(0, y_end - tile_size); x_start = max(0, x_end - tile_size)
                    tile = frame_to_detect[y_start:y_end, x_start:x_end]
                    if tile.shape[0] == 0 or tile.shape[1] == 0: continue
                    results = self.model.predict(source=tile, conf=conf_thres, device=self.device, verbose=False)
                    if results and results[0]:
                        for box in results[0].boxes:
                            if int(box.cls) == 0:
                                x1, y1, x2, y2 = box.xyxy[0]
                                abs_x1 = x_start + x1; abs_y1 = y_start + y1; abs_x2 = x_start + x2; abs_y2 = y_start + y2
                                all_boxes.append([abs_x1, abs_y1, abs_x2, abs_y2]); all_scores.append(float(box.conf[0]))
            if not all_boxes: return 0, original_frame_to_draw_on.copy()
            final_indices = nms(torch.tensor(all_boxes, dtype=torch.float32), torch.tensor(all_scores, dtype=torch.float32), iou_threshold=iou_thres)
            frame_with_boxes = _draw_boxes(original_frame_to_draw_on, all_boxes, all_scores, final_indices)
            return len(final_indices), frame_with_boxes

    # 用优化后的类覆盖原来的定义
    YOLOv8_PyTorch_Detector = YOLOv8_PyTorch_Detector_Optimized
    
    main()