import asyncio
import os

import gradio as gr
from pydub import AudioSegment

from asr_generation import ASRService
from llm_generation import OpenAIAPI
from subtitle_processor import SubtitleProcessor
from video_frame_extract import VideoFrameExtractor


DEFAULT_SYSTEM_PROMPT = "You are a helpful assistant."
DEFAULT_SUBTITLE_SUMMARY_PROMPT = "下面是视频提取的部分字幕片段，该片段对应的是一页PPT内容，该视频是学习类的视频，请你帮我总结改页PPT对应视频字幕的主要内容，使用markdown" \
                                  "格式进行输出，请用中文回答, 尽量简介明了，不用输出额外的概要内容和结论内容"
DEFAULT_VIDEO_SUMMARY_PROMPT = "以下是视频每一页ppt的总结，请你帮我总结整个视频的主要内容，生成总结性的内容，使用markdown格式进行输出，请用中文回答，尽量简介明了"
DEFAULT_VIDEO_MINDMAP_PROMPT = "以下是视频每一页ppt的总结，请你帮我尝试总结整个视频的主要内容，生成思维导图，请使用markdown格式进行输出该思维导图，请用中文回答，尽量简介明了，不要输出总结内容"


def format_time(seconds: float) -> str:
    hours = int(seconds // 3600)
    minutes = int((seconds % 3600) // 60)
    seconds = int(seconds % 60)
    return f"{hours:02}:{minutes:02}:{seconds:02}"


async def extract_frames(extractor, video_path, frames_dir):
    await asyncio.to_thread(extractor.extract_similar_frames, video_path, frames_dir)


async def extract_audio_and_generate_subtitle(video_path, audio_file, subtitle_file):
    # 提取音频
    audio = AudioSegment.from_file(video_path, format="mp4")
    audio.export(audio_file, format="mp3")
    # 生成字幕
    result = await asyncio.to_thread(asr_service.transcribe_single, audio_file)
    with open(subtitle_file, 'w', encoding='utf-8') as f:
        for segment in result["chunks"]:
            start_time = format_time(segment["timestamp"][0])
            end_time = format_time(segment["timestamp"][1])
            text = segment["text"]
            f.write(f"{start_time}--{end_time} {text}\n")


async def process_video(video_path, output_dir, merge_threshold, api_key):
    # 1. 提取视频帧
    extractor = VideoFrameExtractor()
    frames_dir = os.path.join(output_dir, "frames")
    os.makedirs(frames_dir, exist_ok=True)

    # 2. 提取audio和生成字幕
    audio_file = os.path.join(output_dir, "audio.mp3")
    subtitle_file = os.path.join(output_dir, "subtitle.txt")

    # 并行执行提取视频帧和提取音频+生成字幕
    await asyncio.gather(
        extract_frames(extractor, video_path, frames_dir),
        extract_audio_and_generate_subtitle(video_path, audio_file, subtitle_file)
    )
    asr_service.release()

    # 4. 处理字幕
    frames = [os.path.join(frames_dir, f) for f in os.listdir(frames_dir) if f.endswith('.jpg')]
    processor = SubtitleProcessor(subtitle_file, frames, merge_threshold)
    processed_subtitles = processor.process_subtitles()

    # 5. 生成摘要
    openai_api = OpenAIAPI(api_key=api_key, base_url="http://127.0.0.1:11434/v1", model="qwen2.5:7b-instruct-q4_0")
    prompts = [f"{DEFAULT_SUBTITLE_SUMMARY_PROMPT}\n\n{subtitle}" for _, subtitle in processed_subtitles]
    summaries = await openai_api.batch_call_api(prompts)

    # 6. 生成输出
    output_file = os.path.join(output_dir, "output.md")
    with open(output_file, 'w', encoding='utf-8') as f:
        index = 1
        for (frame, _), summary in zip(processed_subtitles, summaries):
            frame_name = os.path.basename(frame)
            f.write(f"## 第{index}页\n\n")
            f.write(f"![{frame_name}](./frames/{frame_name})\n\n")
            f.write(f"{summary}\n\n")
            index += 1

    print(f"处理完成，输出文件：{output_file}")


def process_video_gradio(input_path, output_dir, merge_threshold, api_key):
    if not output_dir:
        if os.path.isfile(input_path.name):
            output_dir = os.path.join(os.path.dirname(input_path.name), "outputs")
        else:
            output_dir = os.path.join(input_path.name, "outputs")
    os.makedirs(output_dir, exist_ok=True)
    if os.path.isfile(input_path.name):
        output_file = asyncio.run(process_video(input_path.name, output_dir, merge_threshold, api_key))
        return output_file
    elif os.path.isdir(input_path.name):
        output_files = []
        for video_file in os.listdir(input_path.name):
            if video_file.endswith(('.mp4', '.avi', '.mov')):
                video_path = os.path.join(input_path.name, video_file)
                video_output_dir = os.path.join(output_dir, os.path.splitext(video_file)[0])
                os.makedirs(video_output_dir, exist_ok=True)
                output_file = asyncio.run(
                    process_video(video_path, video_output_dir, merge_threshold, api_key))
                output_files.append(output_file)
        return "\n".join(output_files)
    else:
        return "输入必须是视频文件或目录"


with gr.Blocks() as demo:
    gr.Markdown("<h1>视频PPT提取工具</h1>")
    input_path = gr.File(label="输入视频文件或目录", file_count="single", file_types=[".mp4", ".avi", ".mov", ".zip"])
    output_dir = gr.Textbox(label="输出目录", placeholder="留空将使用默认输出目录")
    merge_threshold = gr.Slider(0, 1000, value=256, step=1, label="字幕合并阈值")
    api_key = gr.Textbox(label="OpenAI API密钥", type="password")
    btn = gr.Button("开始处理")
    output_text = gr.Textbox(label="处理结果")
    btn.click(process_video_gradio, inputs=[input_path, output_dir, merge_threshold, api_key], outputs=output_text)

asr_service = ASRService()
demo.launch()
