# predict.py
from ultralytics import YOLO
import json
import os
import cv2
from datetime import datetime
import yaml
import os

# -------------------------- 1. 路径配置（完全匹配目录结构） --------------------------
# 当前文件（predict.py）所在目录：Project/yolo_front/
CURRENT_FILE_DIR = os.path.dirname(os.path.abspath(__file__))

# 工作目录：第一层 ultralytics/（即当前目录向上两级）
# Project/yolo_front/ → Project/ → ultralytics/
WORK_DIR = os.path.abspath(os.path.join(CURRENT_FILE_DIR, "..", ".."))

# 验证 WORK_DIR 是否正确（可选调试）
if os.path.basename(WORK_DIR) != "ultralytics":
    print(f"⚠️  工作目录可能不正确: {WORK_DIR}")
    print(f"    请确认此目录是项目根目录（包含 cfg/ 和 Project/）")

# predict.yaml 的路径：从 WORK_DIR 进入 cfg/predict.yaml
PREDICT_YAML_PATH = os.path.join(WORK_DIR, "cfg", "predict.yaml")

class YOLOPredictor:
    def __init__(self, model_path="weights/best.pt"):
        self.model = YOLO(model_path)
        self.results = []
        self.all_results_data = []
        # 默认配置路径
        self.config_path = "D:/1_ShiXiProjects/ultralytics/cfg/predict.yaml"

    def load_config(self):
        """
        从 predict.yaml 加载预测参数
        """
        print(f"🔧 正在加载配置文件: {self.config_path}")
        try:
            with open(self.config_path, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            print(f"✅ 成功加载配置文件: {self.config_path}")
            return config
        except Exception as e:
            print(f"❌ 加载配置失败: {e}")
            # 返回默认值（可从 shared_data 或硬编码获取）
            return self.get_default_config()

    def get_default_config(self):
        """返回默认配置（当 YAML 加载失败时使用）"""
        device = "cuda:0" if self.model.device.type != 'cpu' else "cpu"
        return {
            "confidence": 0.3,
            "iou": 0.4,
            "image_size": 640,
            "device": device,
            "project": "runs/predict",
            "show": True,
            "save_frames": False,
            "save_txt": True,
            "save_conf": True,
            "save_crop": False,
            "show_labels": True,
            "show_conf": True,
            "show_boxes": True
        }
    
    def predict_img(self, image_paths):
        # 清空上一次结果
        self.results.clear()
        self.all_results_data.clear()

        # 加载配置
        config = self.load_config()

        # 构建预测参数
        predict_kwargs = {
            "conf": config.get("confidence", 0.25),
            "iou": config.get("iou", 0.45),
            "imgsz": config.get("image_size", 640),
            "device": config.get("device", None),
            "show": config.get("show", False),
            "save": config.get("save_frames", False),
            "save_txt": config.get("save_txt", False),
            "save_conf": config.get("save_conf", False),
            "save_crop": config.get("save_crop", False),
            "show_labels": config.get("show_labels", True),
            "show_conf": config.get("show_conf", True),
            "show_boxes": config.get("show_boxes", True),
            "project": config.get("project", "runs/detect"),
            "exist_ok": True  # 避免重复文件夹报错
        }

        # 执行预测
        results = self.model.predict(image_paths, **predict_kwargs)
        self.results = results

        # 处理结果
        for result in results:
            result_data = self._process_single_result(result)
            self.all_results_data.append(result_data)

        print(f"✅ 预测完成，共处理 {len(results)} 张图像")

    def predict_video(self, video_path, save_output=False, output_path=None, max_frames=None):
        self.results.clear()
        self.all_results_data.clear()

        if not os.path.exists(video_path):
            raise FileNotFoundError(f"视频文件不存在: {video_path}")

        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise ValueError(f"无法打开视频: {video_path}")

        fps = cap.get(cv2.CAP_PROP_FPS)
        frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        print(f"📹 正在处理视频: {video_path}")
        print(f"📊 分辨率: {width}x{height} | FPS: {fps:.2f} | 总帧数: {frame_count}")

        out = None
        if save_output:
            if output_path is None:
                name, ext = os.path.splitext(video_path)
                output_path = f"{name}_pred{ext}"
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
            print(f"💾 将保存标注视频至: {output_path}")

        # 加载配置
        config = self.load_config()

        frame_idx = 0
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret or (max_frames and frame_idx >= max_frames):
                break

            # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # 单帧预测（使用配置）
            result = self.model.predict(
                # frame_rgb,
                frame,
                conf=config.get("confidence", 0.25),
                iou=config.get("iou", 0.45),
                imgsz=config.get("image_size", 640),
                device=config.get("device", None),
                show=config.get("show", False),
                save=False,  # 不自动保存，由我们控制
                save_txt=config.get("save_txt", False),
                save_conf=config.get("save_conf", False),
                save_crop=config.get("save_crop", False),
                show_labels=config.get("show_labels", True),
                show_conf=config.get("show_conf", True),
                show_boxes=config.get("show_boxes", True),
                project=config.get("project", "runs/detect"),
                exist_ok=True,
                verbose=False
            )[0]

            self.results.append(result)

            # 结构化数据
            frame_data = self._process_single_result(
                result,
                frame_index=frame_idx,
                timestamp=frame_idx / fps if fps > 0 else 0.0,
                source_video=video_path
            )
            self.all_results_data.append(frame_data)

            # 可视化并写入
            # if save_output:
            #     annotated_frame = result.plot()
            #     annotated_frame_bgr = cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR)
            #     out.write(annotated_frame_bgr)
            annotated_frame = result.plot()  # 已经是 BGR
            if save_output:
                out.write(annotated_frame)

            frame_idx += 1
            if frame_idx % 50 == 0:
                print(f"  处理中... {frame_idx}/{frame_count} 帧")

        cap.release()
        if out:
            out.release()

        print(f"✅ 视频预测完成，共处理 {frame_idx} 帧")

    def _process_single_result(self, result, frame_index=None, timestamp=None, source_video=None):
        """
        内部方法：处理单张图像或单帧的预测结果，返回结构化字典
        """
        # --- 原始图像/帧信息 ---
        orig_dir = result.path if hasattr(result, 'path') else (source_video or "video_frame")
        orig_shape = result.orig_shape

        # --- 检测框数据 ---
        boxes_data = None
        if result.boxes is not None and len(result.boxes) > 0:
            boxes_data = {
                "xyxy": result.boxes.xyxy.cpu().numpy().round(2).tolist(),
                "xywh": result.boxes.xywh.cpu().numpy().round(2).tolist(),
                "cls": result.boxes.cls.cpu().numpy().astype(int).tolist(),
                "conf": result.boxes.conf.cpu().numpy().round(4).tolist(),
                "names": [result.names[int(cls)] for cls in result.boxes.cls]
            }

        # --- 性能信息 ---
        speed_data = {
            "preprocess_ms": round(result.speed.get("preprocess", 0), 2),
            "inference_ms": round(result.speed.get("inference", 0), 2),
            "postprocess_ms": round(result.speed.get("postprocess", 0), 2)
        }

        # --- 路径信息 ---
        save_dir = getattr(result, 'save_dir', None)  # 先获取
        if not save_dir:  # 如果是 None 或空字符串
            save_dir = "runs/detect"        
        saved_image = os.path.join(save_dir, f"frame_{frame_index:06d}.jpg") if frame_index is not None else "result.jpg"

        # --- 构建结果字典 ---
        result_dict = {
            "source": orig_dir,
            "orig_shape": {"height": int(orig_shape[0]), "width": int(orig_shape[1])},
            "boxes": boxes_data,
            "speed_ms": speed_data,
            "save_dir": save_dir,
            "saved_image": saved_image,
            "generated_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        }

        # 如果是视频帧，添加帧信息
        if frame_index is not None:
            result_dict.update({
                "frame_index": frame_index,
                "timestamp_sec": round(timestamp, 3) if timestamp is not None else None
            })

        return result_dict

    def results_to_json(self, output_path="output_reports/pre_params.json"):
        """
        将预测结果保存为 JSON 文件
        :param output_path: 输出 JSON 文件路径
        :return: None
        """
        # 创建输出目录
        output_dir = os.path.dirname(output_path)
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)

        # 写入 JSON 文件
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(self.all_results_data, f, ensure_ascii=False, indent=4)

        print(f"📄 所有预测结果已保存至: {output_path}")

    def save_images(self, save_dir="results/predict_output", prefix="result_"):
        """
        （可选）保存所有预测图像到指定目录（支持图像和视频帧）
        """
        os.makedirs(save_dir, exist_ok=True)

        for i, result in enumerate(self.results):
            img_name = f"frame_{i:06d}.jpg" if len(self.results) > 1 and not hasattr(result, 'path') else \
                       os.path.basename(getattr(result, 'path', f"image_{i}.jpg"))
            save_path = os.path.join(save_dir, f"{prefix}{i+1}_{img_name}")
            result.save(filename=save_path)
            print(f"🖼️  保存图像: {save_path}")

    def predict_camera(self, camera_index=0, save_output=False, output_path=None, max_duration=None):
        self.results.clear()
        self.all_results_data.clear()

        cap = cv2.VideoCapture(camera_index)
        if not cap.isOpened():
            raise ValueError(f"无法打开摄像头: {camera_index}")

        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = cap.get(cv2.CAP_PROP_FPS) or 30

        print(f"🎥 打开摄像头 {camera_index}，分辨率: {width}x{height}，FPS: {fps:.2f}")

        out = None
        if save_output:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            output_path = output_path or f"output_camera_{timestamp}.mp4"
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
            print(f"💾 将保存检测视频至: {output_path}")

        start_time = datetime.now()
        frame_idx = 0

        # 加载配置
        config = self.load_config()

        print("📸 实时检测中... 按 'q' 键退出")

        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break

            if max_duration and (datetime.now() - start_time).total_seconds() > max_duration:
                print(f"⏱️ 达到最大时长 {max_duration} 秒，自动停止")
                break

            # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # 执行预测
            result = self.model.predict(
                # frame_rgb,
                frame,
                conf=config.get("confidence", 0.25),
                iou=config.get("iou", 0.45),
                imgsz=config.get("image_size", 640),
                device=config.get("device", None),
                show=False,  # 不用 YOLO 自带 show（我们自己显示）
                save=False,
                save_txt=config.get("save_txt", False),
                save_conf=config.get("save_conf", False),
                save_crop=config.get("save_crop", False),
                show_labels=config.get("show_labels", True),
                show_conf=config.get("show_conf", True),
                show_boxes=config.get("show_boxes", True),
                project=config.get("project", "runs/detect"),
                exist_ok=True,
                verbose=False
            )[0]

            self.results.append(result)

            # 结构化数据
            timestamp_sec = (datetime.now() - start_time).total_seconds()
            frame_data = self._process_single_result(
                result,
                frame_index=frame_idx,
                timestamp=timestamp_sec,
                source_video=f"camera_{camera_index}"
            )
            self.all_results_data.append(frame_data)

            # 可视化
            annotated_frame = result.plot()
            cv2.imshow("YOLOv8 实时检测 - 按 Q 退出", annotated_frame)
            # annotated_frame_bgr = cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR)
            # cv2.imshow("YOLOv8 实时检测 - 按 Q 退出", annotated_frame_bgr)

            if save_output:
                out.write(annotated_frame)
                # out.write(annotated_frame_bgr)

            frame_idx += 1

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        if out:
            out.release()
        cv2.destroyAllWindows()

        print(f"✅ 摄像头预测完成，共处理 {frame_idx} 帧")

    def generate_report(self):
        """
        生成人类可读的预测报告
        :return: 格式化的字符串
        """
        if not self.all_results_data:
            return "❌ 无预测数据可供生成报告。"

        report_lines = []
        report_lines.append("=" * 70)
        report_lines.append("            📊 YOLO 预测详细报告")
        report_lines.append("=" * 70)
        report_lines.append(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        # 判断是图片、视频还是摄像头
        source_type = "图像"
        if len(self.all_results_data) > 1:
            first_source = self.all_results_data[0].get("source", "")
            if "camera" in first_source:
                source_type = "摄像头实时画面"
            else:
                source_type = "视频"
        report_lines.append(f"处理类型: {source_type}")
        report_lines.append(f"总样本数: {len(self.all_results_data)}")
        report_lines.append("-" * 70)

        # 统计各类别总数
        class_count = {}
        total_inference_time = 0.0

        for i, data in enumerate(self.all_results_data):
            # 构建样本标题
            if source_type == "摄像头实时画面":
                sample_title = f"📌 第 {i+1} 帧检测"
            elif source_type == "视频":
                frame_info = data.get('frame_index', i)
                timestamp = data.get('timestamp_sec', 0)
                sample_title = f"📌 视频帧 {frame_info} (时间: {timestamp:.2f}s)"
            else:  # 图像
                src_name = os.path.basename(data['source'])
                sample_title = f"📌 图像: {src_name}"
            report_lines.append(sample_title)

            # 输入图像原始尺寸
            orig_shape = data.get('orig_shape', {})
            height = orig_shape.get('height', '未知')
            width = orig_shape.get('width', '未知')
            report_lines.append(f"  📏 原始尺寸: {width} x {height}")

            # 检测框和预测参数
            boxes = data.get('boxes')
            if boxes and boxes['cls']:
                report_lines.append("  📦 检测结果:")
                for j, (cls_id, conf, name, xywh) in enumerate(zip(
                    boxes['cls'], boxes['conf'], boxes['names'], boxes['xywh'])):
                    # xywh: [中心x, 中心y, 宽度, 高度]
                    center_x, center_y, w, h = xywh
                    report_lines.append(
                        f"    {j+1}. {name} (ID:{cls_id}) | "
                        f"置信度: {conf:.4f} | "
                        f"中心坐标: ({center_x:.1f}, {center_y:.1f}) | "
                        f"宽高: {w:.1f} x {h:.1f}"
                    )
                # 更新类别统计
                for name in boxes['names']:
                    class_count[name] = class_count.get(name, 0) + 1
            else:
                report_lines.append("  📦 检测结果: 未检测到任何对象")

            # 各阶段耗时
            speed = data.get('speed_ms', {})
            preprocess_ms = speed.get('preprocess_ms', 0)
            inference_ms = speed.get('inference_ms', 0)
            postprocess_ms = speed.get('postprocess_ms', 0)
            total_inference_time += inference_ms

            report_lines.append(
                f"  ⏱️  耗时: 预处理 {preprocess_ms:.1f}ms | "
                f"推理 {inference_ms:.1f}ms | 后处理 {postprocess_ms:.1f}ms"
            )

            report_lines.append("")  # 空行分隔

        # 添加统计摘要
        report_lines.append("=" * 70)
        report_lines.append("📈 摘要统计")
        report_lines.append("=" * 70)
        if class_count:
            report_lines.append("检测到的类别:")
            for cls_name, count in sorted(class_count.items(), key=lambda x: x[1], reverse=True):
                report_lines.append(f"  • {cls_name}: 共 {count} 个")
        else:
            report_lines.append("检测到的类别: 未检测到任何对象")

        avg_inference = total_inference_time / len(self.all_results_data) if self.all_results_data else 0
        report_lines.append(f"平均推理耗时: {avg_inference:.2f} ms/帧")

        # ✅ 生成最终报告字符串
        report_text = "\n".join(report_lines)
        
        # ✅ 新增：输出到终端
        print(f"获取到的信息：\n{report_text}")

        return report_text  # 仍然返回，供 GUI 显示


# # =======================
# # 使用示例
# # =======================
# if __name__ == "__main__":
#     # 初始化预测器
#     predictor = YOLOPredictor("weights/best.pt")

#     # 示例1: 预测图像
#     # image_paths = [
#     #     "results/predict/road121.png",
#     #     "results/predict/road57.png",
#     #     "results/predict/road4.png",
#     # ]
#     # predictor.predict_img(image_paths)

#     # # 示例2: 预测视频（取消注释运行）
#     # video_path = "results/predict/stopSign2.mp4"  # 👈 替换为你的视频路径
#     # predictor.predict_video(
#     #     video_path,
#     #     save_output=True,      # 保存带标注的视频
#     #     max_frames=None         # 只处理前100帧（测试用，设为 None 处理全部）
#     # )

#     # 示例：摄像头实时检测
#     predictor.predict_camera(
#         camera_index=0,           # 摄像头索引
#         save_output=True,         # 是否保存视频
#         output_path="outputs/camera_output.mp4",  # 可选路径
#         max_duration=60           # 最长运行 60 秒（可选）
#     )

#     # 保存检测结果到 JSON
#     predictor.results_to_json("output_reports/camera_pred_results.json")

#     # 查看第一帧结果
#     if predictor.all_results_data:
#         print("\n📊 第一帧检测结果:")
#         print(json.dumps(predictor.all_results_data[0], indent=2, ensure_ascii=False))

#     # 保存结果到 JSON
#     predictor.results_to_json("output_reports/video_pred_results.json")

#     # 打印第一帧结果
#     if predictor.all_results_data:
#         print("\n📊 第一帧检测结果:")
#         print(json.dumps(predictor.all_results_data[0], indent=2, ensure_ascii=False))
