import os
import shutil
import tempfile
import gradio as gr
from moviepy.editor import VideoFileClip
from transformers import pipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperFeatureExtractor
import datetime
import torch
from opencc import OpenCC
import util

print("Starting Gradio interface...", "cuda" if torch.cuda.is_available() else "cpu", torch.cuda.is_available())
# 指定使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-medium").to(device)
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-medium")
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-medium")

# 在模型初始化时指定语言
model.config.forced_decoder_ids = tokenizer.get_decoder_prompt_ids(language="zh", task="transcribe")

transcriber = pipeline("automatic-speech-recognition", model=model, tokenizer=tokenizer,
                      feature_extractor=feature_extractor, device=device)

# 初始化OpenCC，用于繁体转简体
cc = OpenCC('t2s')

def extract_audio_from_video(video_path):
    """从视频文件中提取音频"""
    video = VideoFileClip(video_path)
    # 创建临时文件来保存音频
    with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_audio:
        video.audio.write_audiofile(temp_audio.name)
        return temp_audio.name

def generate_caption(video_file):
    # 首先从视频中提取音频
    audio_file = extract_audio_from_video(video_file)
    
    # 使用模型生成字幕，并设置return_timestamps=True
    result = transcriber(audio_file, return_timestamps=True)
    segments = result["chunks"]

    # 生成SRT格式的字幕
    srt_content = ""
    total_segments = len(segments)
    total_second = 0  # 因为语音识别时间戳，当text为空时，时间会从0开始。所以要记录总时长
    srt_index = 1
    last_end_time = 0  # 记录上一段字幕的结束时间
    last_segment_end = 0  # 记录上一个segment的结束时间

    last_end_timestamp = segments[0]["timestamp"][0]
    for idx, segment in enumerate(segments, start=1):
        # 打印进度
        print(f"Processing segment {idx}/{total_segments}", segment["timestamp"][0], segment["timestamp"][1])

        text = segment["text"]
        # 将繁体中文转换为简体中文
        text = cc.convert(text)
        print("Text:", text)
        # 因为语音识别时间戳，当text为空时，时间会从0开始。所以要记录总时长
        if text is None or len(text) == 0 or segment["timestamp"][1] is None:
            print("Skipping segment with empty text or invalid timestamp")
            total_second += last_end_timestamp
            continue
        else:
            last_end_timestamp = segment["timestamp"][1]

        # 将timestamp元组转换为列表以便修改
        timestamp_list = list(segment["timestamp"])
        # 时间戳需要累加上之前的总时长
        timestamp_list[0] = max(last_segment_end, timestamp_list[0] + total_second)
        # 确保每个片段至少有0.2秒的持续时间
        timestamp_list[1] = max(timestamp_list[0] + 0.2, timestamp_list[1] + total_second)
        # 确保与上一个片段之间没有间隙
        if last_segment_end > 0:
            timestamp_list[0] = last_segment_end
        segment["timestamp"] = tuple(timestamp_list)
        last_segment_end = segment["timestamp"][1]

        # 格式化时间戳
        def format_timedelta(td):
            total_seconds = int(td.total_seconds())
            milliseconds = int(td.microseconds / 1000)
            hours, remainder = divmod(total_seconds, 3600)
            minutes, seconds = divmod(remainder, 60)
            return f"{hours:02}:{minutes:02}:{seconds:02},{milliseconds:03}"

        # 根据标点符号和字数限制分段
        sub_texts = []
        current_text = ""
        duration = segment["timestamp"][1] - segment["timestamp"][0]

        for char in text:
            current_text += char
            # 当达到17个字或遇到标点符号时分段
            if len(current_text.strip()) >= 17 or char in ['，', '。', ',', '.']:
                if current_text.strip():
                    sub_texts.append(current_text.strip())
                    current_text = ""

        if current_text.strip():  # 添加最后一段文本
            sub_texts.append(current_text.strip())

        # 如果没有分段，则保持原样
        if not sub_texts:
            # 如果文本超过17个字，强制分段
            if len(text) > 17:
                sub_texts = [text[i:i + 17] for i in range(0, len(text), 17)]
            else:
                sub_texts = [text]

        # 为每个子段计算时间戳，并确保最小显示时间和避免重叠
        MIN_DURATION = 1.0  # 最小显示时间（秒）
        MIN_GAP = 0.1  # 字幕之间的最小间隔（秒）
        # 计算每个子段的理想显示时间
        ideal_time_per_segment = duration / len(sub_texts)
        # 确保每个子段至少显示MIN_DURATION秒
        time_per_segment = max(ideal_time_per_segment, MIN_DURATION)

        # 调整总时长以适应所有子段
        total_needed_time = time_per_segment * len(sub_texts)
        if total_needed_time > duration:
            # 如果需要的总时间超过了原始时长，则均匀分配时间
            time_per_segment = duration / len(sub_texts)

        # 初始化当前时间点，确保从segment的开始时间开始
        current_time = segment["timestamp"][0]
        for i, sub_text in enumerate(sub_texts):
            # 计算当前子段的开始和结束时间，确保不会早于当前时间点
            sub_start_time = max(current_time, last_end_time + MIN_GAP)
            sub_end_time = sub_start_time + time_per_segment

            # 确保不会超过当前片段的结束时间
            if i == len(sub_texts) - 1:
                sub_end_time = min(sub_end_time, segment["timestamp"][1])

            # 确保时间戳递增
            if sub_start_time >= sub_end_time:
                sub_end_time = sub_start_time + MIN_DURATION

            # 更新时间记录
            last_end_time = sub_end_time
            current_time = sub_end_time + MIN_GAP

            # 转换为timedelta对象并格式化
            start_td = datetime.timedelta(seconds=sub_start_time)
            end_td = datetime.timedelta(seconds=sub_end_time)

            srt_content += f"{srt_index}\n{format_timedelta(start_td)} --> {format_timedelta(end_td)}\n{sub_text}\n\n"
            srt_index += 1

    # 根据视频文件名生成SRT文件名
    video_file_name = os.path.basename(video_file)
    srt_file_name = os.path.splitext(video_file_name)[0] + ".srt"
    
    # 保存SRT文件到项目目录
    target_srt_file_path = os.path.join(util.get_folder_path(), srt_file_name)
    with open(target_srt_file_path, "w", encoding="utf-8") as srt_file:
        srt_file.write(srt_content)
        srt_file.flush()
        os.fsync(srt_file.fileno())

    # 清理临时音频文件
    os.unlink(audio_file)

    # 提取纯文本内容
    text_only_content = extract_text_from_srt(srt_content)

    return srt_content, target_srt_file_path, text_only_content

def extract_text_from_srt(srt_content):
    """从SRT字幕内容中提取纯文本，去除序号和时间戳"""
    lines = srt_content.split('\n')
    text_lines = []
    i = 0
    while i < len(lines):
        line = lines[i].strip()
        # 跳过空行和序号行
        if not line or line.isdigit() or '-->' in line:
            i += 1
            continue
        # 收集文本行
        text_lines.append(line)
        i += 1
    return '\n'.join(text_lines)

# 创建Gradio界面
iface = gr.Interface(
    fn=generate_caption,
    inputs=gr.Video(label="上传视频文件"),
    outputs=[
        gr.Textbox(label="生成的字幕内容", lines=10),
        gr.File(label="下载SRT文件"),
        gr.Textbox(label="提取的纯文本内容", lines=10)
    ],
    title="视频字幕提取器",
    description="上传一个视频文件，自动提取并生成对应的字幕文件。支持中文视频内容，将自动转换为简体中文。"
)

# 启动界面
iface.launch(server_port=7830, allowed_paths=[os.path.abspath(os.path.join(os.getcwd(), ".."))])