#-*-coding:utf-8-*-
import pyttsx3

def use_pyttsx3(words,audio_file):
    # 创建对象
    rob = pyttsx3.init()
    # 获取当前语音速率
    rate = rob.getProperty('rate')
    # 设置新的语音速率
    rob.setProperty('rate', 175)
    print(f'语音速率：{rate}')
    # 获取当前语音音量
    volume = rob.getProperty('volume')
    # 设置新的语音音量，音量最小为 0，最大为 1
    rob.setProperty('volume', 1.0)
    print(f'语音音量：{volume}')
    # 获取当前语音声音的详细信息
    voices = rob.getProperty('voices')
    for _v in voices:
        print(_v.name)
    # 设置当前语音声音为女性，当前声音不能读中文
    #rob.setProperty('voice', voices[1].id)
    # 设置当前语音声音为男性，当前声音可以读中文
    rob.setProperty('voice', voices[0].id)
    print(f'语音声音详细信息：{voices}')
    # 获取当前语音声音
    voice = rob.getProperty('voice')
    print(f'语音声音：{voice}')
    # 语音文本
    #path = 'test.txt'
    #with open(path, encoding='utf-8') as f_name:
    #    words = str(f_name.readlines()).replace(r'\n', '')
    # 将语音文本说出来
    #rob.say(words)
    rob.save_to_file(words,audio_file)
    rob.runAndWait()
    rob.stop()

# importing libraries
import speech_recognition as sr

import os

from pydub import AudioSegment
from pydub.silence import split_on_silence

# a function that splits the audio file into chunks
# and applies speech recognition
def silence_based_conversion(path = "alice-medium.wav"):

    # open the audio file stored in
    # the local system as a wav file.
    song = AudioSegment.from_wav(path)

    # open a file where we will concatenate
    # and store the recognized text
    fh = open("recognized.txt", "w+")
        
    # split track where silence is 0.5 seconds
    # or more and get chunks
    chunks = split_on_silence(song,
        # must be silent for at least 0.5 seconds
        # or 500 ms. adjust this value based on user
        # requirement. if the speaker stays silent for
        # longer, increase this value. else, decrease it.
        min_silence_len = 500,

        # consider it silent if quieter than -16 dBFS
        # adjust this per requirement
        silence_thresh = -16
    )

    # create a directory to store the audio chunks.
    try:
        os.mkdir('audio_chunks')
    except(FileExistsError):
        pass

    # move into the directory to
    # store the audio files.
    os.chdir('audio_chunks')

    i = 0
    # process each chunk
    for chunk in chunks:
            
        # Create 0.5 seconds silence chunk
        chunk_silent = AudioSegment.silent(duration = 10)

        # add 0.5 sec silence to beginning and
        # end of audio chunk. This is done so that
        # it doesn't seem abruptly sliced.
        audio_chunk = chunk_silent + chunk + chunk_silent

        # export audio chunk and save it in
        # the current directory.
        print("saving chunk{0}.wav".format(i))
        # specify the bitrate to be 192 k
        audio_chunk.export("./chunk{0}.wav".format(i), bitrate ='192k', format ="wav")

        # the name of the newly created chunk
        filename = 'chunk'+str(i)+'.wav'

        print("Processing chunk "+str(i))

        # get the name of the newly created chunk
        # in the AUDIO_FILE variable for later use.
        file = filename

        # create a speech recognition object
        r = sr.Recognizer()

        # recognize the chunk
        with sr.AudioFile(file) as source:
            # remove this if it is not working
            # correctly.
            r.adjust_for_ambient_noise(source)
            audio_listened = r.listen(source)

        try:
            # try converting it to text
            rec = r.recognize_google(audio_listened)
            # write the output to the file.
            fh.write(rec+". ")

        # catch any errors.
        except sr.UnknownValueError:
            print("Could not understand audio")

        except sr.RequestError as e:
            print("Could not request results. check your internet connection")

        i += 1

    os.chdir('..')

#from https://freegpt.one/
import pytesseract
import audioio

def audioToText(audiofile):
    text = ''
    # 打开音频文件
    with audioio.load_audio(audiofile) as source:
        # 读取音频数据
        audio_data = source.read()
        # 使用 pytesseract 将音频数据转换为文本
        text = pytesseract.image_to_string(image=audio_data, lang='eng')
        # 打印转换结果
        #print(text)
    with open("recognized1.txt", "w") as af:
        af.write(text)


if __name__ == '__main__':
    msg = '''甲男无奈之中，只得找上乙男，希望他不要这么做，而乙男则与甲男达成协议，协议的内容很简单——乙男继承王位，与国王的军队演一场戏，杀死自己的替身，成为新的国王。

    战争终于在国王的城堡下结束，看似国王的军队赢得了战争，乙男死去，但实际上是乙男胜利了，但丙女终于明白，一切都是乙男的阴谋，于是，她试图杀死成为国王的乙男，却被识破。

    成为国王的乙男，知道宗教的可怕，不仅禁止了自己创立的教派，也禁止了甲男所在的摩尼教，并且遍天下追杀甲男。'''
    #with open('msg.txt','r',encoding='utf-8') as _file:
    #    msg = _file.read()
    #use_pyttsx3(msg,'几个需要避免的美国英语习惯 wangyin.mp3')

    #print('Enter the audio file path')
    #silence_based_conversion(path='boxing.wav')
    audioToText('audio.mp3')