import whisper
import jieba
from moviepy.editor import VideoFileClip
from zhon.hanzi import punctuation
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline


def transcribe_video(video_path):
    # 提取音频
    clip = VideoFileClip(video_path)
    audio_path = "temp_audio.wav"
    clip.audio.write_audiofile(audio_path)

    # 使用 Whisper 进行语音识别
    model = whisper.load_model("medium")  # 可以尝试 'base','small', 'medium', 'large' 等多语言模型
    result = model.transcribe(audio_path, language='zh')  # 指定中文识别
    return result['text']


def add_basic_punctuation(text):
    # 使用 jieba 分词
    words = jieba.cut(text)
    # 简单地通过空格连接，并添加基本的中文标点符号
    punctuated_text = '，'.join(words) + '。'
    return punctuated_text



def restore_punctuation(text):
    model_name = "oliverguhr/fullstop-dutch-punctuation-prediction"
    token = ""  # Replace with your token

    # Load the tokenizer and model with the token
    tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=token)
    model = AutoModelForTokenClassification.from_pretrained(model_name, use_auth_token=token)

    # Initialize the Hugging Face pipeline
    punctuator = pipeline("token-classification", model=model, tokenizer=tokenizer, aggregation_strategy="simple")

    # Use pipeline to restore punctuation
    results = punctuator(text)

    # Construct the punctuated text
    punctuated_text = "".join([token['word'] for token in results])
    return punctuated_text


if __name__ == "__main__":
    # 示例：识别本地视频文件
    video_file = '/Users/zengshan/Desktop/沧元图.S01E03.mp4'
    transcript = transcribe_video(video_file)
    print(transcript)
    print('==================================')
#     transcript=restore_punctuation(transcript)
#     print(transcript)
