# analyzer.py

import shutil
import os
import time
from datetime import datetime
from typing import Dict, Any
# from config import ANALYSIS_DURATION, SAMPLE_FPS, YOLO_MODEL_PATH
from configs import get_config
from utils.minio_client import upload_file
from utils.qwen_client import analyze_with_qwen
from utils.capture_frames import extract_key_frames
from utils.camera_client import fetch_camera_list,report_event
# import logging
import cv2
from omegaconf import DictConfig
import json

# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# logger = logging.getLogger(__name__)

def filter_by_confidence(data: Dict[str, Any], 
                         confidence_filters: Dict[str, float],
                         default_confidence: float = 0.60) -> Dict[str, Any]:
    """
    置信度过滤器
    Args:
        data: 包含事件的JSON数据
        confidence_filters: 事件类型到最小置信度的映射
        default_confidence: 未知类型事件的最小置信度阈值
    Returns:
        过滤后的数据
    """
    if not data.get("is_happen", False):
        return {
            "is_happen": False,
            "events": []
        }
    
    filtered_events = []
    
    for event in data.get("events", []):
        event_type = event.get("event_type", "")
        confidence = event.get("confidence", 0.0)
        
        # 检查事件类型是否在过滤配置中
        if event_type in confidence_filters:
            min_confidence = confidence_filters[event_type]
            if confidence >= min_confidence:
                filtered_events.append(event)
        elif confidence >= default_confidence:
            filtered_events.append(event)
            
    # 如果过滤后没有事件，将 is_happen 改为 false
    return {
        "is_happen": len(filtered_events) > 0,
        "events": filtered_events
    }
    
def to_webmp4(input_file: str) -> str:
    """
    将视频转换为 web-friendly mp4 格式
    """
    output_file = os.path.splitext(input_file)[0] + "_web.mp4"
    command = f"ffmpeg -i {input_file} -c:v libx264 -preset fast -crf 23 -c:a aac -b:a 128k -movflags +faststart {output_file} -y"
    os.system(command)
    return output_file

def draw_boxes_on_image(input_file: str, data: dict) -> str:
    """
    在图像上绘制边界框和事件名称
    """
    # 读取图像
    image = cv2.imread(input_file)
    h, w, _ = image.shape

    # 遍历所有事件
    for event in data["events"]:
        box = event["normalized_box"]
        event_name = event["event_type"]

        # 将归一化坐标转换为像素坐标
        x_center = box["x_center"] * w
        y_center = box["y_center"] * h
        box_w = box["width"] * w
        box_h = box["height"] * h

        # 计算左上角和右下角坐标
        x1 = int(x_center - box_w / 2)
        y1 = int(y_center - box_h / 2)
        x2 = int(x_center + box_w / 2)
        y2 = int(y_center + box_h / 2)

        # 绘制红色矩形框 (BGR: (0, 0, 255))
        cv2.rectangle(image, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=3)

        # # 在框的右上角显示事件名称（避免文字超出图像边界）
        # text = event_name
        # font = cv2.FONT_HERSHEY_SIMPLEX
        # font_scale = 0.8
        # thickness = 2
        # (text_w, text_h), baseline = cv2.getTextSize(text, font, font_scale, thickness)

        # # 右上角位置：(x2, y1)
        # text_x = min(x2, w - text_w - 5)  # 防止超出右边界
        # text_y = max(y1 - 10, text_h + 10)  # 防止超出上边界

        # # 绘制文字背景（可选，增强可读性）
        # cv2.rectangle(image, (text_x, text_y - text_h - 5), (text_x + text_w, text_y + baseline), (0, 0, 255), -1)
        # cv2.putText(image, text, (text_x, text_y), font, font_scale, (255, 255, 255), thickness)

    # 保存或显示结果
    output_file = os.path.splitext(input_file)[0] + "_box.jpg"  #"output_with_box.jpg"
    cv2.imwrite(output_file, image)
    return output_file

    #print(f"结果已保存到: {output_path}")


def analyze_single_camera(camera_id: str, rtsp_url: str,events: str = '',device: str = 'cuda'):
    """
    分析单路摄像头：30秒视频 → 抽帧 → Qwen-VL → 仅施工事件上传 MinIO
    """

    try:
        
        config = get_config()
        MODEL_TYPE = config['ollama']['model_type']
        OLLAMA_HOST = config['ollama']['host']
        OLLAMA_MODEL = config['ollama']['model']
        PROMPT_INDEX = int(config['ollama']['prompt'])
        
        confidence_filters =  {}
        filters_str = config['app']['confidence_filters']
        if filters_str:
            confidence_filters = json.loads(filters_str)
            print(f"使用置信度过滤配置: {confidence_filters}")
            
        # 1. 抽取前、中、尾三帧
        result = extract_key_frames(camera_id,rtsp_url, 30)
        print(f"extract_key_frames: {result}")


        # 2. 调用 Qwen-VL 多图分析
        print("🧠 正在调用 Qwen-VL 进行多帧联合分析...")
        qwen_result = analyze_with_qwen(MODEL_TYPE,result["frames"],OLLAMA_HOST,OLLAMA_MODEL,PROMPT_INDEX,events)
        
        qwen_result = filter_by_confidence(qwen_result,confidence_filters) 
        print(f"Qwen-VL 分析结果: {qwen_result}")
     
        is_happen = qwen_result.get("is_happen", False)
        upload_result = {"video": None, "keyframe": []}

        if is_happen:
            date_str = datetime.now().strftime("%Y-%m-%d")
            
                        
            for keyframe_file in result["frames"]:
                box_file = draw_boxes_on_image(keyframe_file, qwen_result)
                keyframe_obj = f"{date_str}/keyframes/{camera_id}/{os.path.basename(keyframe_file)}"
                upload_result["keyframe"].append( upload_file(box_file, keyframe_obj)[4:] )
            
            video_obj = f"{date_str}/videos/{camera_id}/{os.path.basename(result["video"])}"
            web_vedio_file = to_webmp4(result["video"])
            upload_result["video"] = upload_file(web_vedio_file, video_obj)[4:]
            


        # 3. 清理临时文件
        temp_dir = os.path.dirname(result["video"])
        cleanup_temp(temp_dir)
        qwen_result["camera_id"] = camera_id
        qwen_result.update(upload_result)
        
        print(f"✅ 分析完成: {qwen_result}")
        return  qwen_result
    
    except Exception as e:
        return {"is_happen": False, "error": str(e)}
    

def alanyze_runner(config: DictConfig):
    while True:
        try:
            camera = fetch_camera_list()
            if not camera or 'id' not in camera:
                print("📭 未获取到摄像头，休眠10秒")
                time.sleep(10)
                continue
            
            result = analyze_single_camera(camera['id'], camera['rtsp'], camera.get('eventTypes',''))
            
            if result.get("is_happen",False) is True:
                print(f"✅检测到施工事件，上报事件: {result}")
                report_event(result)
            else:
                print(f"✅分析完成: {result}")    
                
        except Exception as e:
            print(f"Error in alanyze_runner: {str(e)}")
            return
    
def cleanup_temp(temp_dir: str):
    return
    """删除临时目录及其内容"""
    # if os.path.exists(temp_dir):
    #     shutil.rmtree(temp_dir)