# -*- coding:utf-8 -*-

# @Time    : 2024/4/14 16:53
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : voice_bot.py
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

import os
import sys
from multiprocessing import Process, Queue, set_start_method

sys.path.append('../../')
from bot.financial_sales.asr import LxAsrCallback, LxAsrOnline, play_audio
from bot.insurance_planner_gpt.tele_wechat_assistant import TeleWechatAssistant

import wave
import time
from pydub import AudioSegment
from pyaudio import PyAudio, paInt16
from multiprocessing import Semaphore
from threading import Thread
import websockets

import asyncio
import uuid
import json

queue = Queue()
conversation_history = []
name_list = ['王女士', '李先生', '张女士', '吴先生']
age_list = [35, 23, 45, 21]
rights_list = ['已获得优惠券', '已获得降息', '无权益']
from random import choice

name = choice(name_list)

age = choice(age_list)

right = choice(rights_list)

base_info = {'用户姓名': name, '用户年龄': str(age), '初始用户权益': right}

sessionId = str(uuid.uuid4())

#定义一个信号量，用于控制语音识别的开始和结束
semaphore = 1

CHUNK = 800
FORMAT = paInt16
CHANNELS = 1
RATE = 8000
start_msg = {"header": {"event_name": "StartTTS"}}
stop_msg = {"header": {"event_name": "StopTTS"}}



voice_user_input = ''

p = PyAudio()
output_stream = p.open(format=FORMAT,
                        channels=CHANNELS,
                           rate=RATE,
                           output=True)



class SaleAsrCallback(LxAsrCallback):


    def on_message(self, message: dict) -> None:
        if message['header']['event_name'] == 'SentenceEnd':

            user_content = message['payload']['result']
            print('接收到语音输入：{}'.format(user_content))
            # 通过线程进行异步处理
            t = Thread(target=dialogue, args=(user_content,))
            t.start()



async def dialogue(user_content, tts_ws):
    global semaphore, conversation_history
    conversation_dict = {}
    conversation_dict["role"] = "user"
    conversation_dict["content"] = user_content
    conversation_history.append(conversation_dict)
    sales = TeleWechatAssistant()
    global name, sessionId, base_info
    is_stream = True
    reply_result = await sales.async_reply(conversation_history, sessionId, base_info, is_stream)
    result = await send_text(tts_ws, reply_result)
    conversation_dict = {}
    conversation_dict["role"] = "assistant"
    conversation_dict["content"] = result
    conversation_history.append(conversation_dict)
    print('回复话术：{}'.format(result))



async def start_tts():

    uri = "ws://116.196.96.191/speech_editing_huize/streaming"  # 公网
    # uri = "ws://116.196.120.31:8800/speech_editing_huize/streaming"  # 公网
    tts_ws = await websockets.connect(uri)
    start_msg_str = json.dumps(start_msg, ensure_ascii=False)
    await tts_ws.send(start_msg_str)
    msg = await tts_ws.recv()
    msg_dict = json.loads(msg)
    # {'header': {'status': 200, 'status_message': 'ok', 'event_name': 'TTSStarted'}, 'payload': {'audio_sample_rate': 8000, 'audio_encoding': 'PCM_S', 'bits_per_sample': 16, 'num_channels': 1}}
    print(msg_dict)
    if msg_dict["header"]["status"] != 200:
        print("error")
        return
    payload = msg_dict["payload"]
    sample_rate = payload["audio_sample_rate"]
    bits_per_sample = payload["bits_per_sample"]
    sample_width = bits_per_sample // 8
    return tts_ws



async def recv_voice(tts_ws):
    pcm = b""
    while True:
        msg = await tts_ws.recv()
        if isinstance(msg, bytes):
            pcm += msg
            output_stream.write(msg)
            # print(msg)
        else:
            msg_dict = json.loads(msg)
            print(msg_dict)
            break
    return pcm
async def start_audio_connect_asr():
    global semaphore
    global voice_user_input


    ws_addr = "ws://180.184.36.44/asr/v0.8"



    callback = SaleAsrCallback()
    asr_client = LxAsrOnline(ws_addr, callback)
    asr_client.set_sample_rate(RATE)
    asr_client.set_enable_punctuation(False)
    # asr_client.set_vad()

    p = PyAudio()  # 初始化
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)  # 创建录音文件
    # try:
    asr_client.start()
    print("start asr，可以开始说话了:")

    # for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
    while semaphore:
        data = stream.read(CHUNK, exception_on_overflow=False)
        # print(data)
        if data == b"":
            break
        asr_client.send(data)
        time.sleep(0.1)
        # user_input = input("请输入命令：")

    asr_client.stop()
    # print(asr_client._stats.msg_lst)
    asr_client.stats.cal()
    asr_client.stats.log()
    print("close asr")
    # except Exception as err:
    #     print('报错')
    #     print(err)

    stream.stop_stream()
    stream.close()
    p.terminate()

async def send_text(tts_ws, reply_result):
    index = 0
    message = ""

    async for data in reply_result:
        message_dict = data['choices'][0]["delta"]
        if "content" in message_dict:
            fragment = message_dict["content"]
            message = message + fragment
        # message = data["text"]
        # fragment = message[index:]
        # # if len(fragment) > 5 and fragment[-1] in ["，", "。", "!", "）", "？"]:
        # #     print(fragment)
        # index = len(message)
            msg = {"header": {"event_name": "Text"}, "payload": {"text": fragment}}
            msg_str = json.dumps(msg, ensure_ascii=False)
            await tts_ws.send(msg_str)
    # if index < len(message):
    #     print(fragment)
    #     msg = {"header": {"event_name": "Text"}, "payload": {"text": fragment}}
    #     msg_str = json.dumps(msg, ensure_ascii=False)
    #     await tts_ws.send(msg_str)
    print(message)
    stop_msg_str = json.dumps(stop_msg, ensure_ascii=False)
    await tts_ws.send(stop_msg_str)
    return message

async def start_dialogue(tts_ws):
    global semaphore, conversation_history

    sales = TeleWechatAssistant()
    is_stream = True
    reply_result = await sales.async_reply(conversation_history, sessionId, base_info, is_stream)
    result = await send_text(tts_ws, reply_result)
    conversation_dict = {}
    conversation_dict["role"] = "assistant"
    conversation_dict["content"] = result
    conversation_history.append(conversation_dict)

async def main():
    tts_ws = await start_tts()
    dialogue_task = asyncio.create_task(start_dialogue(tts_ws))
    voice_task = asyncio.create_task(recv_voice(tts_ws))
    await dialogue_task
    await voice_task
    while 1:
        if semaphore:
            tts_ws = await start_tts()
            user_input = input("请输入命令：")
            if user_input == "exit":
                break
            dialogue_task = asyncio.create_task(dialogue(user_input, tts_ws))
            voice_task = asyncio.create_task(recv_voice(tts_ws))
            await dialogue_task
            await voice_task
            # await start_audio_connect_asr()
        else:
            time.sleep(0.1)








if __name__ == '__main__':
    # set_start_method('fork')
    
    asyncio.run(main())

