import time
import base64
from openai import OpenAI
from minio import Minio
import datetime


# API 配置
BASE_URL, STEP_API_KEY = "https://api.stepfun.com/v1", "5I046FcXrnQhXJreXQrR78su7Z9FvbyUoJMtVDIXsCzkfrifyKgMUAN0cfBGnK9KP"
COMPLETION_MODEL = "step-1o-turbo-vision"

# 用户问题提示
user_prompt = "帮我看看视频里面的人，坐在驾驶位上的驾驶员，此人有没有 不系安全带，打电话，抽烟，闭眼2秒以上，喝饮料，吃食物，玩手机，打哈欠，双手同时脱离方向盘1秒以上，头部左看右看3秒以上？返回最简洁的json结果，里面包含可信度百分比。"

# user_prompt = "请逐帧分析视频中驾驶位驾驶员行为，检测以下几项内容：1.不系安全带 2.打电话 3.抽烟 4.闭眼≥2秒 5.喝饮料 6.吃食物 7.玩手机 8.双手脱离方向盘≥1秒 9.头部左右张望≥3秒。返回JSON需包含：行为名称(name)、是否发生(occurred)、可信度(confidence 0-100%)、首次发生时间(time_occurred)。时间戳格式[分:秒.毫秒]，未发生则为null。仅返回JSON。"

# user_prompt = """请逐帧分析视频中驾驶位驾驶员行为，检测以下几项内容：1.不系安全带 2.打电话 3.抽烟 4.闭眼≥2秒 5.喝饮料 6.吃食物 7.玩手机 8.双手脱离方向盘≥1秒 9.头部左右张望≥3秒 10.打哈欠。返回JSON需包含：行为名称(name)、是否发生(occurred)、可信度(confidence 0-100%)、首次发生时间(time_occurred)。时间戳格式[分:秒.毫秒]，未发生则为null。仅返回JSON。
# 10.打哈欠，请分析以下生理行为特征：
# a. 口腔动作：下颌突然下坠呈椭圆形开口，保持不闭合，持续5秒  
# b. 眼部特征：眼睑半闭或完全闭合，眼角出现泪膜反光  
# c. 呼吸模式：深度吸气（0.5-1.5秒）伴随喉部震颤  
# d. 面部肌肉：颧大肌松弛，鼻唇沟加深  
# e. 伴随动作：颈后仰＞15度或肩部耸动  
# f. 声学信号：低频喉音（0.2-0.5kHz）  
# 当检测对象同时满足条件a+b+c，并存在至少一项附加特征(d,e,f)时，判定为"打哈欠"行为。如果有说话行为，则不是打哈欠。
# 如果打哈欠，请把 符合特征的条件，写到json中。看下面的json格式示例：

# json格式示例：
# {
#     "behaviors": [
#         {
#             "name": "不系安全带",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null"
#         },
#         {
#             "name": "打电话",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null"
#         },
#         {
#             "name": "抽烟",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null"
#         },
#         {
#             "name": "闭眼≥2秒",
#             "occurred": true,
#             "confidence": 90,
#             "time_occurred": "[17:49.57-18:49.57]"
#         },
#         {
#             "name": "喝饮料",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null"
#         },
#         {
#             "name": "吃食物",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null"
#         },
#         {
#             "name": "玩手机",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null"
#         },
#         {
#             "name": "双手脱离方向盘≥1秒",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null"
#         },
#         {
#             "name": "头部左右张望≥3秒",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null"
#         },
#         {
#             "name": "打哈欠",
#             "occurred": false,
#             "confidence": 100,
#             "time_occurred": "null",
#             "符合特征": "a+b+c+f"
#         }
#     ]
# }
# """

user_prompt = """请逐帧分析视频中驾驶位驾驶员行为，是否打哈欠，打哈欠的生理行为特征如下：
嘴巴张得极大，请判断不要半开，是全开，且保持不动2秒以上，同时发出悠长且略带拖沓的吸气声，眼睛微微闭合，随后又缓缓呼气。返回json，格式如下：
{
    "打哈欠": true/false
}

"""




# 初始化 MinIO 客户端
minio_client = Minio(
    "101.132.64.6:10005",
    access_key="okonoff",
    secret_key="okonoff1234",
    secure=False
)

bucket_name = "aifile"
base_url_prefix = "http://101.132.64.6:10005/aifile"
max_videos_to_analyze = 375  # 可修改为任意数字


def get_video_objects():
    try:
        objects = minio_client.list_objects(bucket_name, recursive=True)
        return [
            obj.object_name for obj in objects
            if obj.object_name.lower().endswith((".mp4", ".flv"))
        ]
    except Exception as e:
        print(f"获取对象时出错: {e}")
        return []


# 创建 OpenAI 客户端
client = OpenAI(api_key=STEP_API_KEY, base_url=BASE_URL)

# 生成时间戳文件名
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
log_filename = f"{timestamp}.txt"

# 打开文件用于写入日志
with open(log_filename, "w", encoding="utf-8") as log_file:

    def log_print(*args, **kwargs):
        """自定义打印函数，同时输出到终端和日志文件"""
        message = " ".join(map(str, args))
        print(message, **kwargs)
        log_file.write(message + "\n")
        log_file.flush()

    video_files = get_video_objects()

    if not video_files:
        log_print("未找到任何视频文件。")
    else:
        log_print(f"共找到 {len(video_files)} 个视频文件，最多分析前 {max_videos_to_analyze} 个。\n")

        for idx, video_key in enumerate(video_files[:max_videos_to_analyze]):
            #如果包含 打哈欠 就继续，不然就跳过
            if "打哈欠" not in video_key:
                log_print(f"跳过视频: {video_key}, 原因: 不包含 '20250617__98321_2'")
                continue

            vedio_path1 = f"{base_url_prefix}/{video_key}"
            log_print(f"\n【视频 {idx + 1}/{max_videos_to_analyze}】")
            log_print(f"视频路径: {video_key}")

            messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "video_url", "video_url": {"url": vedio_path1}},
                        {"type": "text", "text": user_prompt}
                    ]
                }
            ]

            time_start = time.time()
            try:
                response = client.chat.completions.create(
                    model=COMPLETION_MODEL,
                    messages=messages,
                    stream=True
                )

                full_response = ""
                for chunk in response:
                    if chunk.choices[0].delta.content:
                        content = chunk.choices[0].delta.content
                        full_response += content

                log_print("\n分析结果：")
                log_print(full_response)

            except Exception as e:
                log_print(f"第一次处理视频出错: {e}")
                log_print("尝试重新处理视频...")
                time.sleep(1)  # 等待 1 秒后重试
                
                try:
                    response = client.chat.completions.create(
                        model=COMPLETION_MODEL,
                        messages=messages,
                        stream=True
                    )

                    full_response = ""
                    for chunk in response:
                        if chunk.choices[0].delta.content:
                            content = chunk.choices[0].delta.content
                            full_response += content

                    log_print("\n分析结果（重试）：")
                    log_print(full_response)

                except Exception as retry_e:
                    log_print(f"重试失败: {retry_e}")

            time_end = time.time()
            log_print(f"\n总生成时间: {time_end - time_start:.2f}秒")