import speech_recognition as sr
import moviepy.editor as mp
from pydub import AudioSegment
import wave
from pydub.silence import detect_nonsilent
from typing import Union, Generator
import os
import math
import time


def to_wav(path):# 把视频中的音频提取出来
    clip = mp.VideoFileClip(path)
    #os.mkdir("./data")
    clip.audio.write_audiofile("./data/speech.wav")
    return 5

def wav_len(path):
    fr = wave.open(path, 'rb')
    secs = fr.getnframes() / fr.getframerate()
    return secs


def detect(filepath: str) -> Generator[Union[int, list[dict[str, Union[str, float]]]], None, None]:
    yield 0
    stime=time.time()
    data: list[dict[str, Union[str, float]]] = []
    # 把视频中的音频提取出来
    yield to_wav(filepath)

    #导入音频文件
    audio = AudioSegment.from_file("./data/speech.wav", "wav")

    # 剪出有声片段
    not_silence_ranges = detect_nonsilent(audio,500,-30) #第三个是识别音量的可调阈值参数，可以输出audio.dBFS查看音频的分贝数

    yield 10
    num = len(not_silence_ranges)
    print(not_silence_ranges)
    s_index = 0
    time_list = []
    if num == 1:
        chunk=audio
        chunk_name = "./data/" + str(s_index) + ".wav"  # 把中间文件全存到data文件夹里去了
        ##保存文件
        chunk.export(chunk_name, format="wav")
    else:
        for index in range(num):
            if(not_silence_ranges[index][1]-not_silence_ranges[index][0]<10000): #短于10s
                if index == 0 and not_silence_ranges[index][0]<400:
                    start_i = 0
                else:
                    # start_i=round((not_silence_ranges[index][0]+not_silence_ranges[index-1][1])/2)
                    start_i = not_silence_ranges[index][0] - 400  # 保证有声音的片段被完整剪出，所以这里开始向前提前了280，具体数值可根据测试调整
                if (index == len(not_silence_ranges) - 1) and (len(audio)<not_silence_ranges[index][1] + 400):
                    end_i = len(audio)
                else:
                    end_i = not_silence_ranges[index][1] + 400  # 保证有声音的片段被完整剪出，所以这里结束向后延后了300

                time_list.append({'beg': start_i/1000, 'end': end_i/1000})
                chunk=audio[start_i:end_i]
                chunk_name = "./data/" + str(s_index) + ".wav"  # 把中间文件全存到data文件夹里去了
                s_index+=1
                ##保存文件
                chunk.export(chunk_name, format="wav")
            yield index*10//num+10

    good = '录制结束'
    bad = '重新录制'
    curMark: dict[str, Union[str, float]] = {}  # 当前标记
    bc = 1  # 计数
    gc = 1  # 计数

    r = sr.Recognizer()  # 调用识别器
    for i in range(0,s_index):
        #print(i)
        # print(time_list[i][beg])
        test = sr.AudioFile("./data/"+str(i) + ".wav")  # 导入语音文件
        with test as source:
            r.adjust_for_ambient_noise(source, duration=0.2)
            audio = r.record(source)
        type(audio)
        text = r.recognize_google(audio, language="zh-CN", show_all=True)  # 识别输出
        if text:  #有声片段
            print(text)
            for j in range(len(text['alternative'])): #有多种识别结果，只要有一个是准确的就行
                t = text['alternative'][j]
                if good == t['transcript']:
                    print('good:%d' % gc)
                    gc+=1
                    print(time_list[i]['beg'])
                    print(time_list[i]['end'])

                    curMark['label'] = 'good'
                    curMark['start'] = time_list[i]['beg']-1
                    curMark['end'] = time_list[i]['end']+1
                    data.append(curMark)
                    curMark = {}
                    break
                if bad == t['transcript']:
                    print('bad:%d' % bc)
                    bc+=1
                    print(time_list[i]['beg'])
                    print(time_list[i]['end'])

                    curMark['label'] = 'bad'
                    curMark['start'] = time_list[i]['beg']-1
                    curMark['end'] = time_list[i]['end']+1
                    data.append(curMark)
                    curMark = {}
                    break
        os.remove("./data/" + str(i) + ".wav")
        yield i * 80 // s_index +20
    print(time.time()-stime)
    os.remove("./data/speech.wav")#移除声音文件
    yield data




    #return data

#detect("VID_20210825_221343.mp4")

# to_wav("test")
# audio = AudioSegment.from_wav("speech.wav")
# print(audio.dBFS)
# not_silence_ranges = detect_nonsilent(audio,min_silence_len=400,silence_thresh=audio.dBFS, seek_step=1)
# print(not_silence_ranges)

