'''
利用openai-whisper进行语音识别
'''
# import whisper
import librosa
from faster_whisper import WhisperModel
from concurrent.futures import ThreadPoolExecutor
import multiprocessing
import subprocess
import glob
import json
import re
import os
import soundfile as sf  
import librosa  
import time
import librosa.display  
import matplotlib.pyplot as plt  
import numpy as np 
import datetime
import shutil
import threading
from tqdm import tqdm
from pydub import AudioSegment
import numpy as np
from vadDetect import vadDetect
import pubfunc
import translateZhEn
import setting
import dbExcute
import webrtcvad
from funasr import AutoModel

model_dir = "./model/funasr"

MAX_SENTENCE_LENGTH = 20
# model = AutoModel(
#     model=model_dir,
#     vad_model="fsmn-vad",
#     vad_kwargs={"max_single_segment_time": 30000},
#     device="cuda:0",
#     hub="hf",
# )

logger = pubfunc.getLogger()
#device = "cuda" if torch.cuda.is_available() else "cpu"
model_size = "tiny"
# model_size = "large-v3"
# model = WhisperModel("whisper-large-v3-ct2")
model_path = "./faster-whisper/largev3"
# 把模型换成最小的，为了保证能够在测试环境中运行起来，实际环境中需要换成大模型
asr_model = None
# asr_model = WhisperModel(model_size_or_path=model_path, device="cuda", local_files_only=True, compute_type="float16")
# asr_model = WhisperModel(model_size, device="cuda", local_files_only=False, compute_type="float16")
# Initialize variables
funasr_model = None
language = "zh"
stopword= ["感谢观看",
           "请不吝点赞",
           "谢谢观看",
           "字幕",
           "优优独播",
           "明镜",
           "呼唤精品",
           "词曲",
           "简体",
           "社群提供"]
# 根据语音识别的结果，生成字幕信息
class asrTaskClass():
    # 这是识别关的预处理的过程
    # source: source media file;
    # enflag: 0, have english; 1, no have english;
    def __init__(self, source, tmd5, uuid1, enflag = False):

        # 根据source来生成MD5的值,保证文件的唯一性
        self.tmd5 = tmd5
        self.begin = time.time()
        self.threadid = 0
        self.load_model()
        # self.tmd5 = pubfunc.getMD5(pubfunc.getsUUID())[:16]
        self.source = source
        tpath = f"./tmp/{self.tmd5}"
        # 这是音频文件，是为了处理识别使用；
        self.afile = f"{tpath}/audio.wav"
        # 在此过程中，还需要生成两个文件，1080的文件和480的文件
        filename = pubfunc.getFileName(source)
        self.v1080p = f"{tpath}/video_1080p.mp4"
        self.v360p = f"{tpath}/video_360p.mp4"
        self.adbfile = f"{tpath}/audio_adb.json"
        self.subtitle = f"{tpath}/subtitle.srt"
        self.tpath = tpath
        self.enflag = enflag
        self.uuid1 = uuid1
        pubfunc.reMkDir(tpath)
        # 这个地方的源，必须是本地路径，虽然，使用的是download, 但只是本地拷贝
        self.videofile = f"{tpath}/video"

    def load_model(self):
        # global asr_model
        # if(asr_model == None):
        #     asr_model = WhisperModel(model_size_or_path=model_path, device="cuda", local_files_only=True, compute_type="float16")
        # return
        global funasr_model
        if(funasr_model == None):
            funasr_model = AutoModel(
            model=f"{model_dir}/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
            # model="model/SenseVoiceSmall",
            vad_model=f"{model_dir}/speech_fsmn_vad_zh-cn-16k-common-pytorch",
            punc_model=f"{model_dir}/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
            # 设置单句的最长时间为6秒
            vad_kwargs={"max_single_segment_time": 6000},
            device="cuda:0",
        )

    # # 根据segmentrecongziation 来分句
    # def split_on_silence(self, audio_file, min_silence_len=500, silence_thresh=16):

    #     sfile = f"{audio_file}.st"
    #     """
    #     Splits audio into chunks based on silence.

    #     Arguments:
    #         sound: AudioSegment object
    #         min_silence_len: Minimum length of silence in milliseconds
    #         silence_thresh: Silence threshold in dBFS

    #     Returns:
    #         A list of audio chunks.
    #     """
    #     sound = AudioSegment.from_wav(audio_file)
    #     chunks = []
    #     start_time = 0
    #     for i in range(0, len(sound), min_silence_len):
    #         chunk = sound[i:i+min_silence_len]
    #         if len(chunk) > 0:
    #             samples = chunk.get_array_of_samples()
    #             rms = np.sqrt(np.mean(np.square(samples)))
    #             if rms < silence_thresh:
    #                 chunks.append({chunk, start_time})
    #         start_time += min_silence_len

    #     # 获取每个片段的时间戳
    #     with open(sfile, "w+") as sf:
    #         for i, chunk in enumerate(chunks):
    #             start_time = stime
    #             sf.write(f"{stime:.2f}s\n")
     
    # # 阿里的ASR, 只是做个测试，好像不支持时间戳输出，后期再用；
    # def AlAsr(self, audio_file):

    #     res = model.generate(
    #         input=audio_file,
    #         cache={},
    #         language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
    #         use_itn=True,
    #         batch_size_s=60,
    #         merge_vad=True,  #
    #         merge_length_s=15,
    #     )
    #     text = rich_transcription_postprocess(res[0]["text"])
    #     print(text)
    # 这里是调用阿里的asr的方法
    def funasrExec(self, wavfile):

        res = funasr_model.generate(
            input=wavfile,
            cache={},
            language="auto",
            # 使用标点
            use_itn=True,
            sentence_timestamp=True,
            batch_size_s=60,
            merge_vad=True,  
            merge_length_s=15,
        )

        sentence_info = res[0]["sentence_info"]
        results = []
        for s in sentence_info:
            rs = {} 
            rs["start"] = s["start"]/1000
            rs["end"] = s["end"]/1000
            rs["text"] = s["text"]
            rs["timestamp"] = s["timestamp"]
            results.append(rs)
        return self.optimizeByItn(results)

    # 根据标点符号来优化句子
    def optimizeByItn(self, sentences):
        
        results = []
        for index, sentence in enumerate(sentences):
            sentext = sentence["text"].replace(" ", "")
            # 如果是顿号，并且下一句的长度小于20个字，则把两句合并。
            if sentext[-1] == "、":
                if(index+1 < len(sentences)):
                    sen = sentences[index+1]
                    sentext1 = sen["text"].replace(" ", "")
                    mlen = len(sentext) + len(sentext1)
                    if(mlen <= MAX_SENTENCE_LENGTH):
                        rs = {} 
                        rs["start"] = sentence["start"]
                        rs["end"] = sen["end"]
                        rs["text"] = sentext + sentext1
                        # rs["timestamp"] = s["timestamp"]
                        sentences.pop(index+1)
                        results.append(rs)
                        continue
            results.append(sentence)
        return results

    # # 这里是调用whisper的asr的方法
    # def asr(self, audio_file):
    #     # audio, sampling_rate = librosa.load(audio_file, sr=16000)
    #     # with torch.cuda.device(device):
    #     # 貌似beam_size的设置对于结果有一定的影响，但也没猜出是啥参数；
    #     global asr_model
    #     segments, info = asr_model.transcribe(audio_file, beam_size=5, language=language, word_timestamps=True, 
    #         initial_prompt="你是一个中文专家,请准确输出简体中文")
    #     # result = asr_model.transcribe(audio, language=language, initial_prompt="以下产生的内容都应该是简体")
    #     # result = model.transcribe(audio, language=language, initial_prompt="以下是中文内容")
    #     # segments = list(segments)
    #     result = []
    #     ntext = ""
    #     for segment in segments:
    #         is_stop = False
    #         for st in stopword:
    #             # if segment include stopword continue
    #             if segment.text.find(st) > -1:
    #                 is_stop = True
    #                 break
    #         if is_stop:
    #             continue
    #         segs = {}
    #         segs["start"] = segment.start
    #         segs["end"] = segment.end
    #         segs["text"] = segment.text.replace('"', '\"').replace("'", "\'")
    #         if ntext == segment.text:
    #             seg1 = result.pop()
    #             segs["start"] = seg1["start"]
    #         ntext = segment.text
    #         result.append(segs)
    #     return result

    # 首先对于音频文件进行预处理, 抽取人声，并通过vad进行切分和时间戳定位, 
    # 因为提取人声与whisper冲突，所以只能进行进程调用; 通过该方法生成的文件切片目录是f"{self.tpath}/vocals.wav"
    def extractAudio(self):
        try:
            # 抽取人声时，因为音频文件过大，会造成处理的资源占用过大，出现错误，根据需要对于音频内容进行截取成小于10分钟的分段；
            swave  =  f"{self.tpath}/segment"
            pubfunc.havePath(swave)
            vadd = vadDetect(swave)
            if not vadd.splitWave(self.afile, 9*60):
                return False
            # 生成抽取的文件名称为vocals.wav，统一放到vocals的目录中
            vpath = f"{self.tpath}/vocals"
            pubfunc.havePath(vpath)

            # 循环读取swave的文件名，进行内容的抽取,抽取完成后，放入到vocals目录中
            # 获取当前的环境python
            alist = glob.glob(f"{swave}/*.wav")
            for fw in alist:
                shellcmd = [f"sudo docker exec -i spleeter bash -c 'cd /root/spleeter&&python3 separatAudio.py -i {fw} -p {self.tpath}'"]        
                ret = subprocess.run(shellcmd, stdin=None, input=None, stdout=None, stderr=None, timeout=3000, shell=True)
                if(ret.returncode != 0):
                    logger.error("音频抽取失败["+",".join(shellcmd)+"]")
                    return False
                afile = f"{self.tpath}/vocals.wav"
                nfile = f"{fw}".replace(swave, vpath)
                shutil.move(afile, nfile)
            return True
        except Exception as e:
            print(e)
            return False

    # 根据目录来进行ASR的处理
    def asrBatch(self):

        # 20240109 人声抽取对于结果的影响不确定，暂时先不用
        # if not self.extractAudio():
        #     logger.error("抽取人声错误!")
        #     return False
        tt = time.time() 
        print(f"线程{self.threadid}已用时{tt-self.begin}秒,开始进行音频的分割!")
        vocals = f"{self.tpath}/vocals"
        # pubfunc.audioStandard1(self.afile, vocals)
        spath = f"{self.tpath}/slice"
        pubfunc.reMkDir(spath)
        # pubfunc.audioStandard1(self.afile, f"{spath}/vocals.wav")
        # 对于抽取的人声进行分割
        # self.tpath = "./tmp/21A332EC"
        vadd = vadDetect(spath)
        # 分片处理音频的内容
        # wavelist = glob.glob(f"{vocals}/*.wav")
        # for wfile in wavelist:
        #     if not vadd.vad(wfile):
        #         return False
        if not vadd.vad(self.afile):
            return False
        tt = time.time() 
        print(f"线程{self.threadid}已用时{tt-self.begin}秒,开始进行音频的识别!")
        alist = sorted(glob.glob(f"{spath}/*.wav"))

        # with multiprocessing.Pool(processes=1) as pool:
        #     results = pool.map(self.executeAsr, alist)
        for fwav in alist:
            self.executeAsr(fwav)
        return True

    def executeAsr(self, fwave):
        t1 = time.time()  
        tf = f"{fwave}.txt"
        result = self.asr(fwave)
        # result = self.AlAsr(fwave)
        if len(result):
            with open(tf, "w+",encoding='utf-8') as f:
                json.dump(result, f, ensure_ascii=False, indent=4)
        t2 = time.time() 
        print(f"线程{self.threadid}已用时{t2-t1}秒,完成[{fwave}]识别!")
    # 这是使用whisper时，需要做切片处理ASR的逻辑
    # def mkSubtitle(self):

    #     try:
    #         if not self.asrBatch():
    #             return False
    #         sary = []
    #         spath = f"{self.tpath}/slice"
    #         alist = sorted(glob.glob(f"{spath}/*.txt"))
    #         for tf in alist:
    #             _start, _end = os.path.basename(tf).split(".")[0].split("_")
    #             s_start = int(_start) / 1000
    #             s_end = int(_end) / 1000
    #             with open(tf) as f:
    #                 sjson = json.load(f)
    #             if len(sjson) == 0:
    #                 continue
    #             for s in sjson:
    #                 tt = {}
    #                 start = s["start"]
    #                 end = s["end"]
    #                 # 如果start == 0，则延后1.5秒, 因为这种时候大概率时间戳不准
    #                 if start == 0 and end > 2:
    #                     start = 1.5
    #                 text = s["text"]
    #                 tt["start"] = round(s_start + start, 2)
    #                 tt["end"] = round(s_start + end, 2)
    #                 tt["text"] = text
    #                 tt["entext"] = ""

    #                 # 对于过长的内容进行分割
    #                 # segs = self.autoSegments(tt)
    #                 # for seg in segs:
    #                 #     sary.append(seg)
    #                 sary.append(tt)
            
    #         # 生成的文件是原文件以.srt结尾
    #         with open(self.subtitle, "w+", encoding='utf-8') as f:
    #             json.dump(sary, f, ensure_ascii=False, indent=4)
    #         return True
    #     except Exception as e:
    #         logger.error(e)
    #         return False
    # 这是使用funasr时，直接生成文本文件的逻辑
    def mkSubtitle1(self):
        try:
            sary = self.funasrExec(self.afile)
            # 生成的文件是原文件以.srt结尾
            with open(self.subtitle, "w+") as f:
                json.dump(sary, f, ensure_ascii=False, indent=4)
            return True
        except Exception as e:
            logger.error(e)
            return False

    def audioDbExtract(self):
        # 加载音频文件
        audio_file, sr = librosa.load(self.source, sr=None)  # 使用原始采样率
        hop_length = int(0.1 * sr)
        db = librosa.amplitude_to_db(np.abs(librosa.stft(audio_file, hop_length=hop_length)), ref=np.max)

        db_values = np.mean(db, axis=0)
        db_values_list = db_values.tolist()

        with open(self.adbfile, "w+", encoding='utf-8') as f:
            json.dump(db_values_list, f, ensure_ascii=False, indent=4)
        return
    # # 智能分句，一个句子如果过长，则分成若干个句子
    # def autoSegments(self, segment):

    #     results = []
    #     SLENGTH = 20
    #     punctuation_pattern = r'[,.!?;:\']+'  

    #     start = segment["start"]
    #     end = segment["end"]
    #     text = segment["text"]
    #     slen = len(text)
        
    #     # 如果句子超过20个字，则进行分析和处理
    #     if(slen < SLENGTH):
    #         results.append(segment)
    #         return results
    #     # 根据标点符号来进行分割
    #     segments = re.split(punctuation_pattern, text) 
    #     # 如果是单句话，超过27进行中间分割
    #     if(len(segments) == 1):
    #         if(len(text) < 27):
    #             results.append(segment)
    #             return results
    #         mid = slen // 2
    #         first_half = s[:mid]  # 取前半部分  
    #         second_half = s[mid:]
    #         # 把时间也按字数进行分切
    #         b_st = (end - start) / 2 
    #         # 分成两段的内容
    #         seg1 = {}
    #         seg2 = {}
    #         seg1["start"] = start
    #         seg1["end"] = start + b_st
    #         seg1["text"] = first_half 
    #         seg2["start"] = start + b_st
    #         seg2["end"] = end
    #         seg2["text"] = second_half
    #         results.append(seg1)
    #         results.append(seg2)
    #         return results

    #     # 分成多段后，根据长度来判断是否需要合成
    #     nseg = ""
    #     nsegs = []
    #     for seg in segments:
    #         length = len(seg)
    #         if length < 6:
    #             nseg = f"{seg},"
    #             continue
    #         nseg = f"{nseg}{seg}"
    #         nsegs.append(nseg)
    #         nseg = ""
    #     # 根据字的内容，进行时间戳的计算        
    #     czt = end - start
    #     ss = 0
    #     nsegs1 = []
    #     for t in nsegs:
    #         seg = {}
    #         nss = ((len(t)+1)/slen) * czt
    #         seg["text"] = t
    #         seg["t"] = nss
    #         nsegs1.append(seg)

    #     toend = 0
    #     for ts in nsegs1:
    #         seg = {}
    #         seg["start"] = start + toend
    #         end1 = start + toend + ts["t"]
    #         if(end1 > end):
    #             end1 = end
    #         seg["end"] = end1

    #         seg["text"] = ts["text"]
    #         toend = toend + ts["t"]
    #         results.append(seg)

    #     return results

    # 获取每句话的时间戳
    def getSegmentTimestamp(self, audio_file):
        pass

    # 执行ASR的程序入口
    def Execute(self):
        try:
            path = os.environ.get("SUBTASK_PATH")
            url = os.environ.get("SUBTASK_URL")

            pubfunc.download(self.source, self.videofile)
            toPath = f"{os.environ.get('SUBTASK_PATH')}/{self.tmd5}"
            pubfunc.reMkDir(toPath)
            # 截取一张图片, 生成的图片地址是self.videofile+.png
            thumbnail = "thumbnail.png"
            thumbnailPath = f"{self.tpath}/{thumbnail}"
            pubfunc.videoShot(self.videofile, thumbnailPath)
            shutil.move(thumbnailPath, toPath)
            print(f"迁移文件[{thumbnailPath}]到[{toPath}]")
            thumbnailurl = f"{url}/{self.tmd5}/{thumbnail}"

            duration, resolution = pubfunc.getDurationAndResolution(self.videofile)

            dbExcute.updateSubtitleThumbnailAndDurationResolution(self.uuid1, thumbnailurl, duration, resolution)

            # 音频标准化1的结果是:音频转成wave格式，重采样为32k
            pubfunc.audioStandard1(self.videofile, self.afile)

            # 生成完成wav的音频文件后，提取音频的响度值
            alfile = f"{self.tpath}/loudness.json"
            pubfunc.getWavLoudness(self.afile, alfile)

            print(f"线程[{self.threadid}]初始化完成")
            # 通过线程的并发来处理
            # 线程1，进行转码处理
            p1 = multiprocessing.Process(target=pubfunc.to360And1080, args=(self.videofile,))
            p1.start()
            # self.mkSubtitle()
            # 生成最终的字幕文件
            if (not self.mkSubtitle1()):
                raise Exception("ASR识别出错！")
            # t3.start()
            p1.join(timeout=7200)
            # t3.join(timeout=3600)
            # 把产生的文件拷到相应的目录下
            # self.mkSubtitle()
        
            # 把响度文件迁移到目录里
            shutil.move(alfile, toPath)
            print(f"迁移文件[{alfile}]到[{toPath}]")

            shutil.move(self.v1080p, toPath)
            print(f"迁移文件[{self.v1080p}]到[{toPath}]")
            shutil.move(self.v360p, toPath)
            print(f"迁移文件[{self.v360p}]到[{toPath}]")
            # shutil.move(self.adbfile, toPath)
            shutil.move(self.subtitle, toPath)
            print(f"迁移文件[{self.subtitle}]到[{toPath}]")


            # tpath是新的1080p的文件
            tpath = f"{path}/{self.tmd5}/video_1080p.mp4"
            # 这是预览文件地址
            reviewurl = f"{url}/{self.tmd5}/video_360p.mp4"
            # 这是字幕文件 的访问地址
            suburl = f"{url}/{self.tmd5}/subtitle.srt"
            # 这是响度文件的访问地址
            audiourl = f"{url}/{self.tmd5}/loudness.json"
            dbExcute.updateSubtitleTaskPath(self.uuid1, tpath, reviewurl, suburl, audiourl)
            try:
                os.unlink(self.source)
            except:
                pass
            return self.mkResult(20000, "ASR处理完成")

        except Exception as e:
            # 如果出现错误，则删除建立的目录
            if(pubfunc.PathIsExit(toPath)):
               pubfunc.rmDir(toPath) 
            logger.error(e)
            return self.mkResult(90000, f"{str(e)[:100]}")
        finally:
            # 最终处理完成后，将删除在./tmp下建立的临时目录
            pubfunc.rmDir(self.tpath) 
            end = time.time()
            print(f"线程{self.threadid}处理用时[{end-self.begin}]")

    # code = 20000 正常
    # code = 90000 错误
    def mkResult(self, status, info):
        result = {}
        result["status"] = status
        result["info"] = info
        result["md5"] = self.tmd5
        return result
        
if __name__ == '__main__':

    begin = datetime.datetime.now()
    srt = asrTaskClass("/home/a.mp4", "1111111111", "11111111", False)
    # srt.getSegmentTimestamp("/home/aaa1.wav")
    srt.split_on_silence("/home/aaa1.wav")
    end = datetime.datetime.now()
    print(begin)
    print(end)
