import json
import random
import re
import time

import azure.cognitiveservices.speech as speechsdk
import gevent.queue
from azure.cognitiveservices.speech import SpeechSynthesisOutputFormat, PronunciationAssessmentWordResult
from pydub import AudioSegment

from common.log import logger
from common.reply import Reply, ReplyType
from common.tmp_dir import TmpDir
from common.utils import Utils
from config import conf
from sqlite.db import Db
from voice.voice import Voice
from common.metrics import measure_elapsed_time

class AzureVoice(Voice):

    def __init__(self,user_id=None):
        # proxy = conf().get('proxy', "http://127.0.0.1:1086")
        azure_api_key = conf().get('azure_api_key','0ed7615ef9e742f88b5c95272ac04cbe')
        azure_region = conf().get('azure_region','eastus')
        self.azure_voice_name = None
        if user_id is not None:
            self.azure_voice_name = Db().get_user_ai_voice(user_id)
        if self.azure_voice_name is None:
            self.azure_voice_name=conf().get('azure_voice_name','en-US-AriaNeural')
        self.speech_config = speechsdk.SpeechConfig(subscription=azure_api_key, region=azure_region)
        self.speech_config.speech_synthesis_voice_name=self.azure_voice_name
        # self.speech_config.set_speech_synthesis_output_format(SpeechSynthesisOutputFormat.AmrWb16000Hz)
        self.speech_config.set_speech_synthesis_output_format(SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3)
        # self.speech_config.set_speech_synthesis_output_format(SpeechSynthesisOutputFormat.Raw8Khz16BitMonoPcm)

        # self.speech_config.speech_recognition_language ='en-US'
        self.speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=self.speech_config)

        self.auto_detect_source_language_config = speechsdk.languageconfig.AutoDetectSourceLanguageConfig(languages=["en-US","zh-CN"])

        self.blend_shapes=[]
        self.q = gevent.queue.Queue()
        self.viseme_ids={}
        self.audio_offsets=[]

   #https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/language-support?tabs=tts#voice-styles-and-roles
    @measure_elapsed_time
    def voiceToText(self, voice_file):
        if voice_file.endswith('.wav'):
            wav_file_Name = voice_file
        else:
            wav_file_Name = TmpDir().path() + 'voice_' + str(int(time.time())) + str(
                random.randint(1000, 9999)) + '.wav'
            AudioSegment.from_file(voice_file, format="amr").export(wav_file_Name, format="wav")
        audio_config = speechsdk.AudioConfig(filename=wav_file_Name)
        speech_recognizer = speechsdk.SpeechRecognizer(speech_config=self.speech_config, audio_config=audio_config,auto_detect_source_language_config=self.auto_detect_source_language_config,)
        result = speech_recognizer.recognize_once()
        if result.reason == speechsdk.ResultReason.RecognizedSpeech:
            logger.info("[Azure] voiceToText voice file name={} text={}".format(voice_file, result.text))
            reply = Reply(ReplyType.TEXT, result.text)
        else:
            cancel_details = result.cancellation_details
            logger.error(
                "[Azure] voiceToText error, result={}, errordetails={}".format(result, cancel_details.error_details))
            reply = Reply(ReplyType.ERROR, "抱歉，语音识别失败")
        return reply

    def viseme_cb(self,evt):
        # logger.debug("Viseme event received: audio offset: {}ms, viseme id: {}.".format(
        #     evt.audio_offset / 10000, evt.viseme_id))
        # if evt.audio_offset > 1000:#viseme id events
        #     self.viseme_ids[evt.audio_offset / 10000]=evt.viseme_id
        # `Animation` is an xml string for SVG or a json string for blend shapes
        if evt.audio_offset != 0:
            self.audio_offsets.append(evt.audio_offset / 10000)
        animation = evt.animation
        if animation and len(animation)>0:
            animation = json.loads(animation)
            self.q.put(animation)

    @measure_elapsed_time
    def textToVoice(self, text,viseme=False):
        text = Utils.normal_tts_text(text)
        if text is None or len(re.findall(r'[a-zA-Z]',text))<2:
            logger.warn(f'normal_tts_text {text} is too short:None or <2')
            return Reply(ReplyType.ERROR, "抱歉，语音合成失败")
        if viseme and not self.azure_voice_name.startswith('en-US'):
            # 视素目前仅支持 en-US（美式英语）神经网络声音
            fallback_voice_codes = {'en-GB-AbbiNeural': 'en-US-AriaNeural', 'en-GB-OliverNeural': 'en-US-BrandonNeural',
                                    'en-GB-MaisieNeural': 'en-US-AnaNeural'}
            self.azure_voice_name=fallback_voice_codes.get(self.azure_voice_name, 'en-US-AriaNeural')
        logger.debug(self.azure_voice_name)
        text = "<![CDATA[" + text + "]]>"
        _text =f"""
            <speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xmlns:mstts="https://www.w3.org/2001/mstts" xml:lang="pt-BR">
                <voice name="{self.azure_voice_name}">
                    { '<mstts:viseme type="FacialExpression"/>' if viseme else ''}
                        <prosody rate = "-10.00%">
                        {text}
                        </prosody>
                   
                </voice>
            </speak>"""
        # <mstts:express-as style="calm">
        # </mstts:express-as>

        # Subscribes to viseme received event
        if viseme:
            self.speech_synthesizer.viseme_received.connect(self.viseme_cb)
        ## Calling the Speech Sythesizer
        speech_synthesis_result = self.speech_synthesizer.speak_ssml_async(ssml=_text).get()
        if viseme:
            timeStamp = 0
            tick = 1 / 60
            while True:
                try:
                    item = self.q.get(block=True, timeout=5)
                    blend_arr = item['BlendShapes']
                    for i in range(len(blend_arr)):
                        blend = []
                        arr = blend_arr[i]
                        for j in range(len(arr)):
                            blend.append(arr[j])
                        self.blend_shapes.append({
                            "time": timeStamp,
                            "blendshapes": blend
                        })
                        timeStamp = timeStamp + tick
                        if self.q.empty():
                            raise Exception('blend shape queue is empty')
                except Exception as error:
                    logger.debug(error)
                    break

        reply = Reply(ReplyType.ERROR, "抱歉，语音合成失败")
        if speech_synthesis_result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
            # Get the audio data as a byte array
            audio_data = speech_synthesis_result.audio_data
            if len(audio_data) > 10:
                mp3_file_name = TmpDir().path() + 'reply_' + str(int(time.time())) +'_'+ str(
                    random.randint(1000, 9999)) + '.mp3'
                voice_file_name = mp3_file_name
                # Write the audio data to an mp3 file
                with open(mp3_file_name, 'wb') as f:
                    f.write(audio_data)
                # Load the MP3 file
                start = time.time()
                voice_file = AudioSegment.from_mp3(mp3_file_name)
                if len(voice_file) > 59900:#直接截断mp3文件的话，需要ffmpeg编译时指定安装mp3编码器
                    voice_file_trunc = voice_file[:59*1000]
                    #Export the MP3 file as AMR
                    amr_file_name = TmpDir().path() + 'voice_' + str(int(time.time())) + str(
                        random.randint(1000, 9999)) + '.amr'
                    voice_file_trunc.set_frame_rate(8000).set_channels(1).set_sample_width(1).export(amr_file_name, format="amr",parameters=["-af", "highpass=f=200, lowpass=f=3000, afftdn=nt=w:nf=-30","-ac","1", "-ar", "8000" ,"-b:a", "12.2k" ])
                    voice_file_name = amr_file_name
                end = time.time()
                logger.info('[Azure] textToVoice elapsed={} seconds text={} ,voice file name={}'.format(end-start,text, voice_file_name))
                reply = Reply(ReplyType.VOICE, voice_file_name)
            else:
                logger.error(' audio data too short')

        elif speech_synthesis_result.reason == speechsdk.ResultReason.Canceled:
            cancellation_details = speech_synthesis_result.cancellation_details
            logger.error("Speech synthesis canceled: {}".format(cancellation_details.reason))
            if cancellation_details.reason == speechsdk.CancellationReason.Error:
                if cancellation_details.error_details:
                    logger.error("Error details: {}".format(cancellation_details.error_details))

            reply = Reply(ReplyType.ERROR, "Speech synthesis canceled")
        else:
            logger.error('[Azure] textToVoice error={}'.format(speech_synthesis_result.reason))
            reply = Reply(ReplyType.ERROR, "抱歉，语音合成失败")

        return reply

    def word_str(self,word:PronunciationAssessmentWordResult):
        # ErrorType:可能值为 None、Omission、Insertion 和 Mispronunciation,省略、插入还是错误读出了字词
        error_type = {'None': '无', 'Omission': '漏读', 'Insertion': '多读', 'Mispronunciation': '误读'}
        t=''
        if word.error_type in ['Mispronunciation']:#Mispronunciation
            t = f'''📖 {word.word}：\t{error_type[word.error_type]}'''
            for syllable in word.syllables:
                t += f'\n\t- {syllable.syllable}：\t{syllable.accuracy_score} 分'
        elif word.error_type in ['Omission','Insertion']:
            t = f'''📖 {word.word}：\t {error_type[word.error_type]}'''
        return t
    def pronAssessment(self, reference_text, voice_file_name):
        logger.debug(f'reference text:{reference_text}')
        wav_file_Name = TmpDir().path() + 'voice_' + str(int(time.time())) + str(
            random.randint(1000, 9999)) + '.wav'
        AudioSegment.from_file(voice_file_name, format="amr").export(wav_file_Name, format="wav")
        config={"referenceText":reference_text, "gradingSystem": "HundredMark", "granularity": "Phoneme", "EnableMiscue":True,"phonemeAlphabet":"IPA"}
        pronunciation_assessment_config = speechsdk.PronunciationAssessmentConfig(json_string=json.dumps(config))
        audio_config = speechsdk.AudioConfig(filename=wav_file_Name)
        speech_recognizer = speechsdk.SpeechRecognizer(speech_config=self.speech_config, audio_config=audio_config,
                                                       auto_detect_source_language_config=self.auto_detect_source_language_config )

        pronunciation_assessment_config.apply_to(speech_recognizer)
        reply = Reply(ReplyType.ERROR, "抱歉，语音合成失败")
        speech_recognition_result = speech_recognizer.recognize_once_async().get()
        if speech_recognition_result.reason == speechsdk.ResultReason.RecognizedSpeech:
            logger.info("[Azure] voiceToText voice file name={} text={}".format(voice_file_name, speech_recognition_result.text))
            result = {}

            # The pronunciation assessment result as a Speech SDK object
            pronunciation_assessment_result = speechsdk.PronunciationAssessmentResult(speech_recognition_result)
            if pronunciation_assessment_result:
                result['PronScore'] = pronunciation_assessment_result.pronunciation_score
                result['AccuracyScore'] = pronunciation_assessment_result.accuracy_score
                result['FluencyScore'] = pronunciation_assessment_result.fluency_score
                result['CompletenessScore'] = pronunciation_assessment_result.completeness_score
                words = pronunciation_assessment_result.words

                words = [w for w in words if w.accuracy_score < 99.5 and w.error_type != 'None']
                lowest_scores_words = sorted(words, key=lambda x: x.accuracy_score)[:3]

                result['lowest_scores'] = [{'Word':w.word,'ErrorType':w.error_type,'AccuracyScore':w.accuracy_score} for w in lowest_scores_words]
                result_str = f'''综合评分：\t{result['PronScore']} 分
准  确  性：\t{result['AccuracyScore']} 分
流  畅  性：\t{result['FluencyScore']} 分
完  整  性：\t{result['CompletenessScore']} 分
{self.word_str(lowest_scores_words[0]) if len(lowest_scores_words)>0 else ''}
{self.word_str(lowest_scores_words[1]) if len(lowest_scores_words)>1 else ''}
{self.word_str(lowest_scores_words[2]) if len(lowest_scores_words)>2 else ''}
'''
                # The pronunciation assessment result as a JSON string
                # pronunciation_assessment_result_json = speech_recognition_result.properties.get(
                #     speechsdk.PropertyId.SpeechServiceResponse_JsonResult)
                # r = json.loads(pronunciation_assessment_result_json)

                # if r and len(r) > 0:
                #     scores = r["NBest"][0]["PronunciationAssessment"]
                #     result['PronScore'] = scores['PronScore']
                #     result['AccuracyScore'] = scores['AccuracyScore']
                #     result['FluencyScore'] = scores['FluencyScore']
                #     result['CompletenessScore'] = scores['CompletenessScore']
                #     words = r["NBest"][0]["Words"]
                #     words = [w for w in words if w['PronunciationAssessment']['AccuracyScore'] != 100]
                #     lowest_scores_words = sorted(words, key=lambda x: x['PronunciationAssessment']['AccuracyScore'])[:3]
                #     lowest_scores_words = sorted(lowest_scores_words,key=lambda x: x['Offset'])
                #     result['lowest_scores'] = lowest_scores_words
                #
                #     result_str=f'''
                #         综合评分:{result['PronScore']}\n
                #         准确性:{result['AccuracyScore']},流畅性:{result['FluencyScore']}，完整性：{result['CompletenessScore']}\n
                #         {self.word_str(lowest_scores_words[0])}\n
                #         {self.word_str(lowest_scores_words[1])}\n
                #         {self.word_str(lowest_scores_words[2])}
                #     '''
                reply = Reply(ReplyType.TEXT, (result_str,result,speech_recognition_result.json))
            else:
                logger.error('pron assessment result is empty'+speech_recognition_result.error_json)
        else:
            cancel_details = speech_recognition_result.cancellation_details
            logger.error(
                "[Azure] voiceToText error, result={}, errordetails={}".format(speech_recognition_result, cancel_details))
            reply = Reply(ReplyType.ERROR, "抱歉，语音纠正失败")
        return reply
        # The pronunciation assessment result as a Speech SDK object
        # pronunciation_assessment_result = speechsdk.PronunciationAssessmentResult(speech_recognition_result)
        # result['PronScore'] = pronunciation_assessment_result.pronunciation_score
        # result['AccuracyScore'] = pronunciation_assessment_result.accuracy_score
        # result['FluencyScore'] = pronunciation_assessment_result.fluency_score
        # result['CompletenessScore'] = pronunciation_assessment_result.completeness_score
        # words = pronunciation_assessment_result.words
        # words = [w for w in words if w.accuracy_score > 99.5]
        # lowest_scores = sorted(words, key=lambda x: x.accuracy_score)[:5]
        # result['lowest_scores'] = lowest_scores



if __name__ == '__main__':
    start = time.time()
    voice = AzureVoice()
    text = '''Peter Piper picked a peck of pickled peppers. Sally sells seashells by the seashore. The quick brown fox jumps over the lazy dog. She saw Sheri's shoes and she sure saw Sheri's shorts.'''
    text2='Her hair is curly.'
    text3='Enjoy your toys.'
    text4='Wow, you found out how to pronounce "th" sound without any difficulty.'

#     text5='''Great effort! Here's a corrected version: "I hope this works well." Remember to use the present tense "works" instead of "do work" and to add the adverb "well" after the verb "works".
# Now, can you tell me about a hobby or interest of yours?'''
    voice.textToVoice(text4,True)
    end = time.time()
    print(str(end-start))# For 9 seconds audio, viseme:17.62 seconds,no-viseme:3.0 seconds
    print(voice.viseme_ids)
    print(voice.blend_shapes)
    print(voice.audio_offsets)
    # text='I would like travel to Newyork ,but i have no idea about Newyork.'
    # r = voice.pronAssessment(text, '/Users/yikuang/Documents/workspace/chatgpt-on-wecom/voice/azure/wechat_audio_example.amr')
    # print(r)
    # segment = AudioSegment()
    # segment.from_file('file','amrwb',)