# -*- coding:utf-8 -*-

# @Time    : 2023/10/22 14:56
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : voice_sales.py
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import os
import sys
from multiprocessing import Process, Queue, set_start_method

sys.path.append('../../')
from bot.bot import Bot
from common.log import logger
import asyncio
from common import constants
from bot.financial_sales.asr import LxAsrCallback, LxAsrOnline, play_audio
from bot.financial_sales.sales import Sales, SaleGPT
import pandas as pd
from util_tool import utils
from openai.openai_object import OpenAIObject
import wave
import time
from pydub import AudioSegment
from pyaudio import PyAudio, paInt16
from multiprocessing import Semaphore
from threading import Thread

import asyncio
import random
import uuid

queue = Queue()
conversation_history = []
name_list = ['王女士', '李先生', '张女士', '吴先生']
age_list = [35, 23, 45, 21]
rights_list = ['已获得优惠券', '已获得降息', '无权益']
from random import choice

name = choice(name_list)

age = choice(age_list)

right = choice(rights_list)

base_info = {'用户姓名': name, '用户年龄': str(age), '初始用户权益': right}

sessionId = str(uuid.uuid4())

#定义一个信号量，用于控制语音识别的开始和结束
semaphore = 1

class VoiceSales(Sales):
    """语音销售-agent"""
    def __init__(self):
        Sales.__init__(self, "Colossal-7B-financial-1026")



voice_user_input = ''



class SaleAsrCallback(LxAsrCallback):


    def on_message(self, message: dict) -> None:
        if message['header']['event_name'] == 'SentenceEnd':

            user_content = message['payload']['result']
            print('接收到语音输入：{}'.format(user_content))
            # 通过线程进行异步处理
            t = Thread(target=dialogue, args=(user_content,))
            t.start()



def dialogue(user_content):
    global semaphore, conversation_history
    conversation_dict = {}
    conversation_dict["role"] = "user"
    conversation_dict["content"] = user_content
    conversation_history.append(conversation_dict)
    sales = VoiceSales()
    global name, sessionId, base_info
    reply_result = asyncio.run(sales.async_reply(name, conversation_history, sessionId, base_info))
    result = reply_result['reply_text']
    conversation_dict = {}
    conversation_dict["role"] = "assistant"
    conversation_dict["content"] = result
    conversation_history.append(conversation_dict)
    print('回复话术：{}'.format(result))
    voice_no = reply_result['voice_record']
    print(semaphore)
    # 进程信号量，表明需要停止语音识别
    semaphore = 0
    play_audio(voice_no)
    time.sleep(0.1)
    # 进程信号量，表明可以开始进行语音识别
    semaphore = 1

def start_audio_connect_asr():
    global semaphore
    global voice_user_input
    CHUNK = 800
    FORMAT = paInt16
    CHANNELS = 1
    RATE = 8000

    ws_addr = "ws://180.184.36.44/asr/v0.8"
    callback = SaleAsrCallback()
    asr_client = LxAsrOnline(ws_addr, callback)
    asr_client.set_sample_rate(RATE)
    asr_client.set_enable_punctuation(False)
    # asr_client.set_vad()

    p = PyAudio()  # 初始化
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)  # 创建录音文件
    # try:
    asr_client.start()
    print("start asr，可以开始说话了:")

    # for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
    while semaphore:
        data = stream.read(CHUNK, exception_on_overflow=False)
        # print(data)
        if data == b"":
            break
        asr_client.send(data)
        time.sleep(0.1)
        # user_input = input("请输入命令：")

    asr_client.stop()
    # print(asr_client._stats.msg_lst)
    asr_client.stats.cal()
    asr_client.stats.log()
    print("close asr")
    # except Exception as err:
    #     print('报错')
    #     print(err)

    stream.stop_stream()
    stream.close()
    p.terminate()

def start_dialogue():
    global semaphore, conversation_history
    # base_info = ['88折息费折扣券']  # 有券
    # base_info = []  # 无既定权益
    sales = VoiceSales()

    reply_result = asyncio.run(sales.async_reply(name, conversation_history, sessionId, base_info))
    result = reply_result['reply_text']
    voice_no = reply_result['voice_record']
    play_audio(voice_no)
    conversation_dict = {}
    conversation_dict["role"] = "assistant"
    conversation_dict["content"] = result
    conversation_history.append(conversation_dict)




if __name__ == '__main__':
    # set_start_method('fork')
    total = 5
    current_total = 0
    print("注意提示才能开始说话！！！")

    # base_info = ['额度:9600.0000/2023-09-14']#[] # 提额

    start_dialogue()
    while 1:
        if semaphore:
            start_audio_connect_asr()
        else:
            time.sleep(0.1)

