import shutil

import gradio as gr
from transformers import pipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperFeatureExtractor
import datetime
import os
import torch

from transformers import pipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperFeatureExtractor

import util

print("Starting Gradio interface...", "cuda" if torch.cuda.is_available() else "cpu", torch.cuda.is_available())
# 指定使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-medium").to(device)
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-medium")
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-medium")

# 在模型初始化时指定语言
model.config.forced_decoder_ids = tokenizer.get_decoder_prompt_ids(language="zh", task="transcribe")

transcriber = pipeline("automatic-speech-recognition", model=model, tokenizer=tokenizer,
                       feature_extractor=feature_extractor, device=device)

from opencc import OpenCC

# 初始化OpenCC，用于繁体转简体
cc = OpenCC('t2s')


def generate_caption(audio_file):
    # 使用模型生成字幕，并设置return_timestamps=True
    result = transcriber(audio_file, return_timestamps=True)
    segments = result["chunks"]

    # 生成SRT格式的字幕
    srt_content = ""
    total_segments = len(segments)
    total_second = 0  # 因为语音识别时间戳，当text为空时，时间会从0开始。所以要记录总时长
    srt_index = 1
    last_end_time = 0  # 记录上一段字幕的结束时间
    last_segment_end = 0  # 记录上一个segment的结束时间

    last_end_timestamp = segments[0]["timestamp"][0]
    for idx, segment in enumerate(segments, start=1):
        # 打印进度
        print(f"Processing segment {idx}/{total_segments}", segment["timestamp"][0], segment["timestamp"][1])

        text = segment["text"]
        # 将繁体中文转换为简体中文
        text = cc.convert(text)
        print("Text:", text)
        # 因为语音识别时间戳，当text为空时，时间会从0开始。所以要记录总时长
        if text is None or len(text) == 0 or segment["timestamp"][1] is None:
            print("Skipping segment with empty text or invalid timestamp")
            total_second += last_end_timestamp
            continue
        else:
            last_end_timestamp = segment["timestamp"][1]

        # 将timestamp元组转换为列表以便修改
        timestamp_list = list(segment["timestamp"])
        # 时间戳需要累加上之前的总时长
        timestamp_list[0] = max(last_segment_end, timestamp_list[0] + total_second)
        # 确保每个片段至少有0.2秒的持续时间
        timestamp_list[1] = max(timestamp_list[0] + 0.2, timestamp_list[1] + total_second)
        # 确保与上一个片段之间没有间隙
        if last_segment_end > 0:
            timestamp_list[0] = last_segment_end
        segment["timestamp"] = tuple(timestamp_list)
        last_segment_end = segment["timestamp"][1]

        # 格式化时间戳
        def format_timedelta(td):
            total_seconds = int(td.total_seconds())
            milliseconds = int(td.microseconds / 1000)
            hours, remainder = divmod(total_seconds, 3600)
            minutes, seconds = divmod(remainder, 60)
            return f"{hours:02}:{minutes:02}:{seconds:02},{milliseconds:03}"

        # 根据标点符号和字数限制分段
        sub_texts = []
        current_text = ""
        duration = segment["timestamp"][1] - segment["timestamp"][0]

        for char in text:
            current_text += char
            # 当达到17个字或遇到标点符号时分段
            if len(current_text.strip()) >= 17 or char in ['，', '。', ',', '.']:
                if current_text.strip():
                    sub_texts.append(current_text.strip())
                    current_text = ""

        if current_text.strip():  # 添加最后一段文本
            sub_texts.append(current_text.strip())

        # 如果没有分段，则保持原样
        if not sub_texts:
            # 如果文本超过17个字，强制分段
            if len(text) > 17:
                sub_texts = [text[i:i + 17] for i in range(0, len(text), 17)]
            else:
                sub_texts = [text]

        # 为每个子段计算时间戳，并确保最小显示时间和避免重叠
        MIN_DURATION = 1.0  # 最小显示时间（秒）
        MIN_GAP = 0.1  # 字幕之间的最小间隔（秒）
        # 计算每个子段的理想显示时间
        ideal_time_per_segment = duration / len(sub_texts)
        # 确保每个子段至少显示MIN_DURATION秒
        time_per_segment = max(ideal_time_per_segment, MIN_DURATION)

        # 调整总时长以适应所有子段
        total_needed_time = time_per_segment * len(sub_texts)
        if total_needed_time > duration:
            # 如果需要的总时间超过了原始时长，则均匀分配时间
            time_per_segment = duration / len(sub_texts)

        # 初始化当前时间点，确保从segment的开始时间开始
        current_time = segment["timestamp"][0]
        for i, sub_text in enumerate(sub_texts):
            # 计算当前子段的开始和结束时间，确保不会早于当前时间点
            sub_start_time = max(current_time, last_end_time + MIN_GAP)
            sub_end_time = sub_start_time + time_per_segment

            # 确保不会超过当前片段的结束时间
            if i == len(sub_texts) - 1:
                sub_end_time = min(sub_end_time, segment["timestamp"][1])

            # 确保时间戳递增
            if sub_start_time >= sub_end_time:
                sub_end_time = sub_start_time + MIN_DURATION

            # 更新时间记录
            last_end_time = sub_end_time
            current_time = sub_end_time + MIN_GAP

            # 转换为timedelta对象并格式化
            start_td = datetime.timedelta(seconds=sub_start_time)
            end_td = datetime.timedelta(seconds=sub_end_time)

            srt_content += f"{srt_index}\n{format_timedelta(start_td)} --> {format_timedelta(end_td)}\n{sub_text}\n\n"
            srt_index += 1

    # 根据音频文件名生成SRT文件名
    audio_file_name = os.path.basename(audio_file)
    srt_file_name = os.path.splitext(audio_file_name)[0] + ".srt"
    # 修改SRT文件路径为当前工作目录
    temp_srt_file_path = os.path.join(os.getcwd(), srt_file_name)
    print("Temporary SRT file path:", temp_srt_file_path)
    # 保存SRT文件，并确保文件内容完整写入
    with open(temp_srt_file_path, "w", encoding="utf-8") as srt_file:
        srt_file.write(srt_content)
        srt_file.flush()  # 确保所有内容写入文件
        os.fsync(srt_file.fileno())  # 确保文件内容同步到磁盘

    # 复制SRT文件到folder_path指定的目录
    target_srt_file_path = os.path.join(util.get_folder_path(), srt_file_name)
    try:
        print("Target SRT file path:", target_srt_file_path)
        shutil.copy(temp_srt_file_path, target_srt_file_path)
    except FileNotFoundError:
        pass

    # 返回字幕内容而不是文件路径
    return srt_content


# 创建Gradio界面
iface = gr.Interface(
    fn=generate_caption,  # 处理上传音频文件的函数
    inputs=gr.Audio(type="filepath"),  # 输入组件，允许用户上传音频文件
    outputs=gr.Text(label="生成的字幕内容"),  # 输出组件，显示字幕内容
    title="音频字幕生成器",  # 界面标题
    description="上传一个音频文件，生成对应的字幕。"  # 界面描述
)

# 启动界面
iface.launch(server_port=7840)