# -*- coding: utf-8 -*-
import requests
import json
import os
import time
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm

# --- 1. 新增：日志记录器 ---
class Logger:
    """
    一个简单的日志记录器，用于将输出同时打印到控制台并缓存，以便最终写入文件。
    """
    def __init__(self):
        self._output_buffer = []

    def log(self, message=""):
        """记录一条信息，打印到控制台并添加到缓冲区。"""
        print(message)  # 实时在控制台显示
        self._output_buffer.append(str(message))

    def write_to_file(self, filepath):
        """将缓冲区的所有内容写入指定文件。"""
        with open(filepath, 'w', encoding='utf-8') as f:
            f.write('\n'.join(self._output_buffer))
        # 在控制台打印最终确认信息，这条信息不会进入日志文件
        print(f"\n✅ 所有输出已成功保存到文件: {filepath}")

# --- 2. 配置区域 ---
# API 和服务器相关配置
API_URL = 'http://172.0.54.122:1040/v1/chat/completions'
API_KEY = 'PTGAPLP7DRREGE4USUJX'
SECRET_KEY = 'TxoGsncIvtPXpyuNOz7gA0ObjrO04ks4UuLBn2bx'
MODEL_NAME = 'qwen25_vl'

# 任务相关配置
FEATURES_FILE = 'feature.json'
LABELS_FILE = 'label.json'
VIDEO_BASE_PATH_ON_SERVER = '/data/obs/in_video/'
ANALYSIS_OUTPUT_DIR = 'video_analysis'

# 并行处理的并发数
MAX_WORKERS = 10

# 待分析的视频时间范围配置
START_TIME_STR = '2025-05-28 9:30'
END_TIME_STR = '2025-05-28 23:59'

def generate_video_filenames(start_str, end_str, logger):
    """根据开始和结束时间字符串，生成每分钟的视频文件名列表。"""
    filenames = []
    time_format = '%Y-%m-%d %H:%M'
    if len(start_str) > 16:
        time_format = '%Y-%m-%d-%H-%M'
        
    try:
        start_time = datetime.strptime(start_str, time_format)
        end_time = datetime.strptime(end_str, time_format)
    except ValueError:
        logger.log(f"!!! 错误: 时间格式不正确。请使用 'YYYY-MM-DD HH:MM' 或 'YYYY-MM-DD-HH-MM' 格式。")
        return []
        
    time_interval = timedelta(minutes=1)
    current_time = start_time
    while current_time <= end_time:
        time_formatted_str = current_time.strftime('%Y-%m-%d-%H-%M')
        filenames.append(f"{time_formatted_str}.mp4")
        current_time += time_interval
    return filenames

def load_features(filepath, logger):
    """从JSON文件中加载工序特征"""
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            return json.load(f)
    except FileNotFoundError:
        logger.log(f"!!! 错误: 特征文件 '{filepath}' 未找到。")
        return None
    except json.JSONDecodeError:
        logger.log(f"!!! 错误: 特征文件 '{filepath}' 格式不正确。")
        return None

def load_video_labels(filepath, logger):
    """从JSON文件中加载视频标签"""
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            data = json.load(f)
    except FileNotFoundError:
        logger.log(f"!!! 错误: 标签文件 '{filepath}' 未找到。")
        return None
    except json.JSONDecodeError:
        logger.log(f"!!! 错误: 标签文件 '{filepath}' 格式不正确。")
        return None

    video_labels = {}
    for item in data:
        path = item.get("路径")
        process_str = item.get("工序")
        if path and process_str:
            labels = [label.strip() for label in process_str.split(',')]
            video_labels[path] = labels
    return video_labels

def create_prompt(features):
    """根据特征列表构建一个用于分类和分析的详细Prompt"""
    prompt_parts = [
        "你是一个煤矿安全生产的专家，请仔细观察视频内容，并根据下面提供的工序特征定义，判断视频中正在进行哪一项工序，并对视频内容进行简要分析。",
        "请只识别一项最匹配的工序。",
        "\n--- 工序特征定义 ---"
    ]
    for item in features:
        part = f"\n工序: {item['工序']}\n特征:\n"
        for desc in item['工序特征']:
            part += f"- {desc}\n"
        prompt_parts.append(part.strip())
    prompt_parts.append("\n--- 任务要求 ---")
    prompt_parts.append("如果视频内容与所有已定义的工序特征都不匹配，请将工序识别为'其他'。")
    prompt_parts.append('请严格按照以下JSON格式返回你的判断结果，不要包含任何解释或其他多余的文字：\n{"工序": "识别出的工序名称", "分析": "对视频内容的简要分析"}')
    return "\n".join(prompt_parts)

def analyze_video(video_url_on_server, prompt_text, features_list):
    """
    发送单个视频到API进行分析，并返回工序、分析及清理后的原始模型输出。
    """
    headers = {'Content-Type': 'application/json', 'X-API-Key': API_KEY, 'X-Secret-Key': SECRET_KEY}
    payload = {
        "model": MODEL_NAME,
        "messages": [{"role": "user", "content": [{"type": "video_url", "video_url": video_url_on_server}, {"type": "text", "text": prompt_text}]}],
        "max_tokens": 512, "repetition_penalty": 1.00, "temperature": 0.01
    }
    try:
        response = requests.post(API_URL, headers=headers, json=payload, timeout=180)
        response.raise_for_status()
        response_data = response.json()
        model_output_str = response_data['choices'][0]['message']['content']
        
        json_str = model_output_str
        if '```json' in json_str:
            json_str = json_str.split('```json\n', 1)[1].rsplit('```', 1)[0]
        
        start_index = json_str.find('{')
        end_index = json_str.rfind('}')
        if start_index != -1 and end_index != -1:
            json_str = json_str[start_index:end_index+1]

        try:
            result_json = json.loads(json_str)
            process = result_json.get("工序", "无法解析工序")
            analysis = result_json.get("分析", "无分析内容")
            return process, analysis, json_str
        except json.JSONDecodeError:
            process = "解析失败"
            for feature in features_list:
                if feature['工序'] in model_output_str:
                    process = feature['工序']
                    break
            return process, model_output_str, model_output_str
            
    except requests.exceptions.RequestException as err:
        return f"请求错误: {err}", "", str(err)
    except (KeyError, IndexError) as err:
        return f"解析响应错误: {err}", "", str(err)

def main():
    """主函数，编排整个识别流程，并将所有输出保存到文件中。"""
    # 实例化日志记录器
    logger = Logger()
    
    logger.log("--- 开始工序识别与分析任务 ---")

    if not os.path.exists(ANALYSIS_OUTPUT_DIR):
        os.makedirs(ANALYSIS_OUTPUT_DIR)
        logger.log(f"已创建目录: {ANALYSIS_OUTPUT_DIR}")

    features = load_features(FEATURES_FILE, logger)
    if not features: return

    logger.log(f"成功加载 {len(features)} 条工序特征。")

    video_labels = load_video_labels(LABELS_FILE, logger)
    if not video_labels: return
    logger.log(f"成功加载 {len(video_labels)} 条视频标签。")

    prompt = create_prompt(features)
    video_filenames = generate_video_filenames(START_TIME_STR, END_TIME_STR, logger)
    if not video_filenames: return
    
    logger.log(f"\n根据时间范围，将使用 {MAX_WORKERS} 个并发线程分析 {len(video_filenames)} 个视频文件。")
    
    results = {}
    
    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        future_to_video = {}
        for filename in video_filenames:
            video_full_path = os.path.join(VIDEO_BASE_PATH_ON_SERVER, filename).replace("\\", "/")
            future = executor.submit(analyze_video, video_full_path, prompt, features)
            future_to_video[future] = filename

        pbar = tqdm(as_completed(future_to_video), total=len(video_filenames), desc="视频分析进度")
        for future in pbar:
            filename = future_to_video[future]
            try:
                process_result, analysis_result, raw_output = future.result()
                results[filename] = {'process': process_result, 'analysis': analysis_result}
                
                json_filename = os.path.splitext(filename)[0] + '.json'
                json_filepath = os.path.join(ANALYSIS_OUTPUT_DIR, json_filename)
                with open(json_filepath, 'w', encoding='utf-8') as f:
                    try:
                        parsed_json = json.loads(raw_output)
                        json.dump(parsed_json, f, ensure_ascii=False, indent=4)
                    except json.JSONDecodeError:
                        f.write(raw_output)

            except Exception as exc:
                # 使用 logger 记录错误，但不影响程序继续执行
                logger.log(f"\n!!! 视频 {filename} 在执行中产生严重错误: {exc}")
                results[filename] = {'process': "执行异常", 'analysis': ""}

    # --- 结果汇总和打印 ---
    logger.log("\n--- 所有视频分析完成 ---")
    logger.log(f"分析报告已保存至 '{ANALYSIS_OUTPUT_DIR}' 目录。")
    logger.log("--- 结果摘要 (按时间排序) ---")
    logger.log("=" * 80)
    logger.log(f"{'视频文件':<30} | {'模型识别工序':<30} | {'原始标签'}")
    logger.log("-" * 80)

    for video in video_filenames:
        process_result = "未处理"
        result = results.get(video)
        if result:
            process_result = result['process']

        label_key = f"in_video/{video}"
        original_labels_list = video_labels.get(label_key)
        
        original_label_str = "无标签"
        if original_labels_list:
            original_label_str = ", ".join(original_labels_list)
        
        logger.log(f"{video:<30} | {process_result:<30} | {original_label_str}")
    
    logger.log("=" * 80)

    # --- 准确率计算 ---
    correct_predictions = 0
    failed_videos = 0
    eligible_videos = 0

    for video in video_filenames:
        result = results.get(video)
        label_key = f"in_video/{video}"
        if result and label_key in video_labels:
            process = result['process']
            
            if "错误" in process or "异常" in process or "解析失败" in process:
                failed_videos += 1
                continue
            
            eligible_videos += 1
            if process in video_labels[label_key]:
                correct_predictions += 1

    logger.log("\n--- 工序识别准确率 ---")
    total_analyzed = len(video_filenames)
    logger.log(f"总共分析视频数: {total_analyzed}")
    logger.log(f"处理失败视频数: {failed_videos}")
    
    if eligible_videos > 0:
        accuracy = (correct_predictions / eligible_videos) * 100
        logger.log(f"参与比对的视频总数 (已排除失败项): {eligible_videos}")
        logger.log(f"识别正确的视频数: {correct_predictions}")
        logger.log(f"准确率: {accuracy:.2f}%")
    else:
        logger.log("没有可用于计算准确率的视频（可能全部处理失败或标签缺失）。")
        
    # --- 文件写入 ---
    # 生成带时间戳的文件名
    timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    output_filename = f"{timestamp}_analysis_report.txt"
    
    # 调用 logger 将所有内容写入文件
    logger.write_to_file(output_filename)


if __name__ == "__main__":
    main()