# For prerequisites running the following sample, visit https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen
import os
import signal  # for keyboard events handling (press "Ctrl+C" to terminate recording)
import sys

import dashscope
import pyaudio
from dashscope.audio.asr import *
from dashscope.audio.tts_v2 import *

import threading
from queue import Queue
import time
from typing import Callable

# Set recording parameters
sample_rate = 16000  # sampling rate (Hz)
channels = 1  # mono channel
dtype = 'int16'  # data type
format_pcm = 'wav'  # the format of the audio data
block_size = 1600  # number of frames per buffer
talk_result_queue = Queue()
text_callback: Callable[[str, str], None] = None


def init_dashscope_api_key():
    """
        Set your DashScope API-key. More information:
        https://github.com/aliyun/alibabacloud-bailian-speech-demo/blob/master/PREREQUISITES.md
    """

    if 'DASHSCOPE_API_KEY' in os.environ:
        dashscope.api_key = os.environ[
            'DASHSCOPE_API_KEY']  # load API-key from environment variable DASHSCOPE_API_KEY
    else:
        dashscope.api_key = '<your-dashscope-api-key>'  # set API-key manually

class TalkResponsor:
    language = 'zh'
    def __init__(self):
        # 初始化一个 messages 数组
        self.__messages_head = {}
        self.__messages_head['zh'] = [
            {
                "role": "system",
                "content": """你是一名陪聊师，对方希望你陪着聊天，每次只需要准确地回答对方一句话，不要用表情回答。你的回答只能中文。""",
            }
        ]
        self.__messages_head['en'] = [
            {
                "role": "system",
                "content": """You are a conversation companion. The other person hopes to chat with you. Each time, please respond to them with only one accurate sentence and do not use any emojis in your reply. Your responses must be in English only.""",
            }
        ]
        self._history_messages = []

    def talk(self, src):
        self._history_messages.append({"role": "user", "content": src})
        messages = [x for x in self.__messages_head[TalkResponsor.language]]
        for x in self._history_messages:
            messages.append(x)
        res = self.__generation(messages)
        # print("Gen: {}".format(res))
        if res.status_code == 200:
            assistant_output = res.output.choices[0].message.content
            # 将大模型的回复信息添加到messages列表中
            self._history_messages.append({"role": "assistant", "content": assistant_output})
            if len(self._history_messages) > 5:
                del self._history_messages[0]
            return assistant_output
        else:
            return None

    def __generation(self, msg):
        response = dashscope.Generation.call(
            # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
            model="qwen-plus",
            messages=msg,
            result_format="message",
            top_p=1.0,
            enable_search=True,
        )
        return response

# Real-time speech recognition callback
class RecogCallback(RecognitionCallback):
    def on_open(self) -> None:
        print('RecognitionCallback open.')

    def on_close(self) -> None:
        print('RecognitionCallback close.')

    def on_complete(self) -> None:
        print('RecognitionCallback completed.')  # recognition completed

    def on_error(self, message) -> None:
        print('RecognitionCallback task_id: ', message.request_id)
        print('RecognitionCallback error: ', message.message)
        # Stop and close the audio stream if it is running
        # Forcefully exit the program
        sys.exit(1)

    def on_event(self, result: RecognitionResult) -> None:
        global talk_result_queue
        global text_callback
        sentence = result.get_sentence()
        if 'text' in sentence and RecognitionResult.is_sentence_end(sentence):
            # print('I say: ', sentence['text'])
            talk_result_queue.put(sentence['text'])
            if text_callback is not None:
                text_callback('user', sentence['text'])



class MainTalker:
    def __run(self):
        global block_size
        while self._running:
            if self._stream:
                data = self._stream.read(block_size, exception_on_overflow=False)
                self._recognition.send_audio_frame(data)
            else:
                break

    def __init__(self):
        global sample_rate
        global format_pcm
        self._running = True
        self._callback = RecogCallback()
        self._recognition = Recognition(
            model='paraformer-realtime-v2',
            # 'paraformer-realtime-v1'、'paraformer-realtime-8k-v1'
            format=format_pcm,
            # 'pcm'、'wav'、'opus'、'speex'、'aac'、'amr', you can check the supported formats in the document
            sample_rate=sample_rate,
            # support 8000, 16000
            semantic_punctuation_enabled=False,
            callback=self._callback)
        self._recognition.start()
        
        self._mic = pyaudio.PyAudio()
        self._stream = self._mic.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=16000,
                        input=True)
        self._worker = threading.Thread(target=self.__run)
        self._worker.start()

    def close(self):
        if self._running:
            self._running = False
            self._stream.stop_stream()
            self._stream.close()
            self._mic.terminate()
            self._stream = None
            self._mic = None
            self._recognition.stop()
            print('Recognition stopped.')
            print(
                '[Metric] requestId: {}, first package delay ms: {}, last package delay ms: {}'
                .format(
                    self._recognition.get_last_request_id(),
                    self._recognition.get_first_package_delay(),
                    self._recognition.get_last_package_delay(),
                ))
            if self._worker is not None and self._worker.is_alive():
                self._worker.join()



class TransCallback(ResultCallback):
    _player = None
    _stream = None

    def on_open(self):
        # print("websocket is open.")
        self._player = pyaudio.PyAudio()
        self._stream = self._player.open(
            format=pyaudio.paInt16, channels=1, rate=22050, output=True
        )

    def on_complete(self):
        # print(" speech synthesis task complete successfully.")
        pass

    def on_error(self, message: str):
        print(f"speech synthesis task failed, {message}")

    def on_close(self):
        # print(" websocket is closed.")
        # 停止播放器
        self._stream.stop_stream()
        self._stream.close()
        self._player.terminate()

    def on_event(self, message):
        pass

    def on_data(self, data: bytes) -> None:
        # print(" audio result length: " + str(len(data)))
        self._stream.write(data)


class VoiceProducer():
    voice_author: str = 'longhan_v2'
    def run(msg):
        callback = TransCallback()
        synthesizer = SpeechSynthesizer(
            model="cosyvoice-v2",
            voice=VoiceProducer.voice_author,
            format=AudioFormat.PCM_24000HZ_MONO_16BIT,
            speech_rate=0.8,
            pitch_rate=1.0,
            volume=100,
            callback=callback,
        )
        synthesizer.call(msg)

glb_running = True
main_worker: threading.Thread = None
mainTalker: MainTalker = None
robotTalker: TalkResponsor = None
def signal_handler(sig, frame):
    global glb_running
    global mainTalker
    print('Ctrl+C pressed, stop recognition ...')
    # Stop recognition
    if mainTalker is not None:
        mainTalker.close()
    glb_running = False
    if main_worker is not None and main_worker.is_alive():
        main_worker.join()
    # Forcefully exit the program
    sys.exit(0)

def async_talk():
    global main_worker
    global glb_running
    glb_running = True
    main_worker = threading.Thread(target=talk_main)
    main_worker.start()

def async_exit():
    global glb_running
    global robotTalker
    global mainTalker
    if glb_running:
        glb_running = False
    if main_worker is not None and main_worker.is_alive():
        main_worker.join()

    if mainTalker is not None:
        mainTalker.close()
        del mainTalker
        mainTalker = None
    
    if robotTalker is not None:
        del robotTalker
        robotTalker = None

def async_listener(callback = None):
    global text_callback
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    if text_callback == None:
        text_callback = callback

def talk_main():
    global glb_running
    global talk_result_queue
    global text_callback
    global robotTalker
    global mainTalker
    init_dashscope_api_key()
    robotTalker = TalkResponsor()
    mainTalker = MainTalker()

    while glb_running:
        if not talk_result_queue.empty():
            voice = talk_result_queue.get()
            response = robotTalker.talk(voice)
            if response is not None:
                # print("Robot say: {}".format(response))
                text_callback('robot', response)
                VoiceProducer.run(response)
        else:
            time.sleep(0.1)

# main function
if __name__ == '__main__':
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    talk_main()
