import whisper
import opencc
import time
from ipex_llm.transformers import AutoModelForSpeechSeq2Seq
import librosa
from transformers import WhisperProcessor
import torch
from moviepy.editor import *



def transformerVideoToAudio(audio_path=r"E:\Project\AtomGit\web\static\audio\a.mp3",
                            text_path=r"E:\Project\AtomGit\web\static\text\a.txt",
                            time_path=r"E:\Project\AtomGit\web\static\text\time.txt"):
    converter=opencc.OpenCC("t2s.json")
    
    # 转录音频
    # model = whisper.load_model("base")
    # 量化whisper模型
    model = AutoModelForSpeechSeq2Seq.from_pretrained(pretrained_model_name_or_path = r"E:\Model\chatglm\ChatGLM3\whisper\whisper-base",
                                                      load_in_low_bit = "sym_int8")
    processor = WhisperProcessor.from_pretrained(pretrained_model_name_or_path=r"E:\Model\chatglm\ChatGLM3\whisper\whisper-base")
    
    data_zh, sample_rate_zh = librosa.load(audio_path)
    
    forced_decoder_ids = processor.get_decoder_prompt_ids(language="chinese", task = "transcribe", no_timestamps=False)
    
    with torch.inference_mode():
        input_features = processor(data_zh, sample_rate = sample_rate_zh, return_tensors = 'pt').input_features
        
        
        predict_ids = model.generate(input_features, forced_decoder_ids = forced_decoder_ids)
        result = processor.batch_decode(predict_ids, skip_special_tokens = False)
        print(result)
        
    # result = model.transcribe(audio_path, word_timestamps=True, language="Chinese")

    # 获取"segments"键对应的值
    with open (text_path,"w",encoding="utf-8") as f:
        result_text=result['text']
        result_text=converter.convert(result_text)
        f.write(result_text)
        print("successfully saved segments.txt!")
    #打开一个.txt文件以写入"segments"的内容
    segments = result.get("segments")

    if segments is not None:
        with open(time_path, "w", encoding="utf-8") as txt_file:
            for segment in segments:
                start_time = segment.get("start")
                end_time = segment.get("end")
                text = segment.get("text")
                text=converter.convert(text)
                txt_file.write(f"start:{start_time} --> end:{end_time} {text}\n")

        print("Segments已写入 audio_reserve\segments.txt 文件。")
    else:
        print("segments键不存在于result字典中。")
if __name__=="__main__":
    time_1=time.time()
    transformerVideoToAudio()
    time_2=time.time()
    print(f"总耗时：{time_2-time_1}秒")