# -*- coding: utf-8 -*-
# time: 2025/4/17 11:36
# file: video_to_text_cpu.py.py
# author: hanson
import cv2
import torch
from decord import VideoReader, cpu
from transformers import BlipProcessor, BlipForConditionalGeneration
from typing import List


def extract_keyframes(video_path: str, max_frames: int = 6) -> List[torch.Tensor]:
    """提取固定数量的关键帧（避免长视频卡死）"""
    vr = VideoReader(video_path, ctx=cpu(0))
    frame_indices = range(0, len(vr), len(vr) // max_frames)
    return [torch.from_numpy(vr[i].asnumpy()) for i in frame_indices if i < len(vr)]


def video_to_text(
        video_path: str,
        model_name: str = "Salesforce/blip-image-captioning-base"
) -> str:
    # 初始化模型（首次运行会自动下载约1.2GB权重）
    processor = BlipProcessor.from_pretrained(model_name)
    model = BlipForConditionalGeneration.from_pretrained(model_name)
    model.eval()  # 切换到推理模式

    # 提取关键帧并生成描述
    frames = extract_keyframes(video_path)
    descriptions = []
    for frame in frames:
        inputs = processor(images=frame, return_tensors="pt")
        with torch.no_grad():  # 禁用梯度计算节省内存
            out = model.generate(**inputs, max_new_tokens=30)
        desc = processor.decode(out[0], skip_special_tokens=True)
        descriptions.append(desc)

    return "\n".join(f"Frame {i + 1}: {desc}" for i, desc in enumerate(descriptions))


if __name__ == "__main__":
    import sys

    video_path = sys.argv[1] if len(sys.argv) > 1 else "./video/hello.mp4"
    print(video_to_text(video_path))