# Copyright(C) 2023. Huawei Technologies Co.,Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys
import time
import random
from collections import deque
from datetime import datetime
from multiprocessing import Process, Queue, Pipe, Value, Event

import numpy as np

sys.path.append("../")
sys.path.append("../AI_Module")
sys.path.append("../AI_Module/kws")
sys.path.append("../AI_Module/asr")
sys.path.append("../AI_Module/tts")
sys.path.append("../AI_Module/wav2lip")

from Utils import utils
from AI_Module.kws_model import KwsModel
from AI_Module.asr_model import AsrModel
from AI_Module.llm_model import LlmModel
from AI_Module.tts_model import TtsModel
from AI_Module.wav2lip_model import Wav2lipModel


def contains_chinese_char_or_digit(sentence):
    for char in sentence:
        # Chinese (simplified & traditional) character UniCode range: U+4E00 through U+9FFF
        if '\u4e00' <= char <= '\u9fff' or '\u0030' <= char <= '\u0039':
            return True
    return False


class DigitalHumanPipeline:
    """ ADH Pipeline assembles the AI modules ("KWS", "ASR", "LLM", "TTS", "WAV2LIP") together """
    def __init__(self, client_id, ws_input_queue, pipeline_output_queue):
        self.history = []
        self.client_id = client_id
        self.kws_in_queue = Queue()
        self.kws_out_queue = Queue()
        self.asr_in_queue = Queue()
        self.asr_out_queue = Queue()
        self.llm_in_queue = Queue()
        self.llm_out_queue = Queue()
        self.tts_in_queue = Queue()
        self.tts_out_queue = Queue()
        self.ws_input_queue = ws_input_queue
        self.pipeline_output_queue = pipeline_output_queue
        self.mic_opened = False
        self.listen_start = Value('b', False)
        self.kws_on = True
        self.first_llm_sentence = True
        self.first_round = True
        self.response_dict = {
            0: "请说。",
            1: "在的呢。",
            2: "我在呢。",
        }
        # “hey snips” 通常可在0.5-1.5s内说完，滑窗窗口大小设置为1.5s（1.5s = segment_duration_ms * maxlen)
        self.window = deque(maxlen=3)

    def run_kws_model(self, kws_init_done):
        # Run KWS model, and it will push the output signal to kws_out_queue
        kws_model = KwsModel(self.kws_in_queue, self.kws_out_queue, kws_init_done)
        kws_model.run()

    def run_asr_model(self, asr_init_done):
        # Run ASR model, and it will push the output text to asr_out_queue
        asr_model = AsrModel(self.asr_in_queue, self.asr_out_queue, asr_init_done)
        asr_model.run()

    def run_llm_model(self, llm_init_done):
        # Run LLM model, and it will push the output to llm_out_queue
        llm_model = LlmModel(self.llm_in_queue, self.llm_out_queue, llm_init_done)
        llm_model.run()

    def run_tts_model(self, tts_init_done):
        # Run TTS model, and it will push the output to tts_out_queue
        tts_model = TtsModel(self.tts_in_queue, self.tts_out_queue, tts_init_done)
        tts_model.run()

    def run_wav2lip_model(self, wav2lip_init_done):
        # Run WAV2LIP model, and it will push the output to wav2lip_out_queue
        wav2lip_model = Wav2lipModel(self.tts_out_queue, self.pipeline_output_queue, wav2lip_init_done)
        wav2lip_model.run()

    def run_pipeline(self):
        print("\n\n------------------------------------------------------------------------------------\n\
[ADH][LOADING] Digital human start initializing, please be patient...\n\
------------------------------------------------------------------------------------\n\n")

        # Wait until all models are ready, then start running
        kws_init_done = Event()
        asr_init_done = Event()
        llm_init_done = Event()
        tts_init_done = Event()
        wav2lip_init_done = Event()

        # # Run KWS model in a separate process
        kws_process = Process(target=self.run_kws_model, args=(kws_init_done, ))
        kws_process.start()
        # Run ASR model in a separate process
        asr_process = Process(target=self.run_asr_model, args=(asr_init_done, ))
        asr_process.start()
        # Run LLM model in a separate process
        llm_process = Process(target=self.run_llm_model, args=(llm_init_done, ))
        llm_process.start()
        # Run TTS model in a separate process
        tts_process = Process(target=self.run_tts_model, args=(tts_init_done, ))
        tts_process.start()
        # Run WAV2LIP model in a separate process
        wav2lip_process = Process(target=self.run_wav2lip_model, args=(wav2lip_init_done, ))
        wav2lip_process.start()

        kws_init_done.wait()
        asr_init_done.wait()
        llm_init_done.wait()
        tts_init_done.wait()
        wav2lip_init_done.wait()
        print("\n\n------------------------------------------------------------------------------------\n\
[ADH][READY] All modules are ready, Please open your Microphone and start speaking.\n\
------------------------------------------------------------------------------------\n\n")

        while True:
            try:
                if not self.ws_input_queue.empty():
                    audio_data_numpy = self.ws_input_queue.get() # 0.5s 的音频片段, np.array格式
                    audio_data_numpy = audio_data_numpy.astype(np.float32, order='C') # int16 -> float32
                    audio_data_numpy = audio_data_numpy / 32768.0 # int16最大绝对值为32768，归一化
                    audio_data_numpy = audio_data_numpy.reshape(1, -1)

                    # 通知用户KWS开始监听唤醒关键词
                    if not self.mic_opened:
                        print("[KWS] 开始监听关键词...等待唤醒\n")
                        self.mic_opened = True

                    # 将音频按滑窗的窗口大小给到KWS作为输入
                    self.window.append(audio_data_numpy)
                    window_audio = np.concatenate(list(self.window), axis=1)

                    if self.kws_on:
                        self.kws_in_queue.put(window_audio)

                    asr_input = {
                        "waveform": audio_data_numpy,
                        "listen_start": self.listen_start.value,
                    }

                    if self.listen_start.value:
                        print("[TIMER] 用户开始说话 {}".format(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')))
                    self.asr_in_queue.put(asr_input)

                    # 将listen_start=True信号给到ASR后，立刻重置为False（ASR模块内部会判断用户何时说完）
                    self.listen_start.value = False

                if not self.kws_out_queue.empty():
                    result = self.kws_out_queue.get()
                    if result:
                        # "hey,snips" 唤醒后数字人的回答语句
                        if self.first_round:
                            tts_hello = {
                                "text": "您好，我是昇腾数字人，请问有什么需要帮助的吗？",
                                "is_end": True,
                            }
                            self.tts_in_queue.put(tts_hello)
                            time.sleep(6) # 唤醒回答语句需约6s讲完
                            self.first_round = False
                        else:
                            response_tag = random.randrange(3) # 生成[0-2]的回复tag
                            tts_hello = {
                                "text": self.response_dict.get(response_tag),
                                "is_end": True,
                            }
                            self.tts_in_queue.put(tts_hello)
                            time.sleep(1) # 唤醒回答语句需约1s讲完

                        print("\n[ADH] 请开始说话，同时请保持环境安静，问题说完后请保持静音（建议闭麦) :)")
                        self.listen_start.value = True
                        self.kws_on = False

                        # 在打印用户开始说话时，清空打印前录入web_input_queue的所有音频，只保留最新的一段500ms音频
                        while self.ws_input_queue.qsize() > 1:
                            self.ws_input_queue.get()

                if not self.asr_out_queue.empty():
                    asr_out_dict = self.asr_out_queue.get()
                    if asr_out_dict.get("silence"):
                        # 用户语音输入为空音频时，通知ASR重新开始监听
                        tts_hello = {
                            "text": "抱歉，没有听到您的问题，请再说一次呢。",
                            "is_end": True,
                        }
                        self.tts_in_queue.put(tts_hello)
                        time.sleep(5) # 唤醒回答语句需约5s讲完
                        self.kws_on = True # KWS重新开始工作
                        print("[KWS][ASR] 没有听到你的问题，请重新使用关键词“hey,snips”唤醒数字人")
                        continue
                    if "text" in asr_out_dict and asr_out_dict["text"] != "":
                        print("[ASR] 用户音频输入: ", asr_out_dict["text"])
                        print("[TIMER][ASR] 首句推理完成于 {}".format(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')))
                        self.llm_in_queue.put({"query": asr_out_dict["text"], "history": self.history})

                if not self.llm_out_queue.empty():
                    llm_out_data = self.llm_out_queue.get()
                    sentence = llm_out_data["sentence"]
                    is_end = llm_out_data["is_end"]
                    if self.first_llm_sentence:
                        print("[TIMER][LLM] 首句推理完成于 {}".format(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')))
                        self.first_llm_sentence = False
                    print("[LLM] 数字人回答:", sentence)
                    if sentence and contains_chinese_char_or_digit(sentence):
                        tts_input = {
                            "text": sentence,
                            "is_end": is_end,
                        }
                        # LLM会将isEnd透传给TTS，在最后一个语句时isEnd为True，其余为False
                        self.tts_in_queue.put(tts_input)

                    # LLM会在返回最后一句话时同时返回先前所有的history，流水线需要维护该用户的history
                    if llm_out_data["is_end"]:
                        self.history = llm_out_data["history"]
                        self.first_llm_sentence = True
                        self.kws_on = True
                        print("[KWS] 可继续使用关键词“hey,snips”唤醒数字人")
                
                # While循环默认sleep时间
                time.sleep(0.001)

            except Exception as e:
                print("[ERROR] Pipeline catched exception...")
                ''' Shut down all the AI modules '''
                kws_process.terminate()
                asr_process.terminate()
                tts_process.terminate()
                wav2lip_process.terminate()
                print("[TERMINATED] Pipeline shutdown, thank you for using :)")
                