# -*- coding: utf-8 -*-
import sounddevice as sd
import vosk
import queue
import json
from gtts import gTTS
import os
import time
import re
import pygame
from pygame import mixer
from openai import OpenAI
import subprocess
import dashscope
from dashscope.audio.tts import SpeechSynthesizer

dashscope.api_key='sk-184cb40367364fd4af5c99befb9c2766'

mixer.init()

q = queue.Queue()

# 本地或云端语音识别模型
model = vosk.Model(r"./vosk-model-small-cn-0.22")
if model is None:
    print("模型加载失败!")
# 本地deeepseek模型
client = OpenAI(
base_url="https://api.siliconflow.cn/v1",
api_key="sk-xhxtzzohvbudznzltlnscdebfsirmsslannnbwnsjyqfptos"
)

# 接收语音标志
listen_flag = 1

# 去掉思考内容
def remove_think_tags(text):
    pattern = r'<think>.*?</think>'
    return re.sub(pattern, '', text, flags=re.DOTALL)

# 语音合成函数使用gtts或ekho
def speak(text):
    listen_flag = 0
    try:
        print("speak")
        # 假设SpeechSynthesizer.call是你实际使用的语音合成方法
        result = SpeechSynthesizer.call(model='sambert-zhiwei-v1',
                                        voice = 'sambert-zhiwei-v1',
                                        text=text,
                                        sample_rate=44100,
                                        format='wav')

        # 确保返回的音频数据不为None
        if result.get_audio_data() is not None:
            output_path = os.path.expanduser('~/ai-helper/output.wav')
            with open(output_path, 'wb') as f:
                f.write(result.get_audio_data())
            mixer.music.load(output_path)
            mixer.music.play()
            while mixer.music.get_busy():  # 如果音频正在播放
                pygame.time.Clock().tick(10)  # 每10毫秒检查一次音频播放状态
            print("ok")
    except Exception as e:
        print(f"语音合成出错: {str(e)}")
    
    listen_flag = 1


# 识别语音回调函数
def callback(indata, frames, time, status):
    global listen_flag
    if listen_flag:
        q.put(bytes(indata))


print(sd.query_devices()) 
with sd.RawInputStream(device=1,samplerate=44100, blocksize=8000, dtype='int16', channels=1, callback=callback):
    recognizer = vosk.KaldiRecognizer(model, 44100)
    print("请开始说话...")

    while True:
        listen_flag = 1
        data = q.get()
        if recognizer.AcceptWaveform(data):
            result = recognizer.Result()
            text = json.loads(result)["text"]
            print("你说的是: " + text)
            if "龙" in text:
                speak("我是奶龙，我才是奶龙")
                
            if "你好" in text or "您好" in text:
                listen_flag = 0
                print("我在")
                speak("我在")
                
                count_none = 0
                while True:
                    listen_flag = 1
                    data = q.get()
                    if recognizer.AcceptWaveform(data):
                        result = recognizer.Result()
                        text = json.loads(result)["text"]
                        text = text.replace(" ", "")
                        print("你说的是: " + text)
                        listen_flag = 0
                        if text == "":
                            count_none += 1
                    
                            
                        if count_none == 2:
                            
                            speak("我先下了，有事叫我，白白")
                            break

                        elif text == "再见":
                            speak("白白")
                            break
                        else:
                            speak("我正在思考")
                            response = client.chat.completions.create(
                            model="deepseek-ai/DeepSeek-V3",
                            
                          
                            #messages=[{"role": "user", "content": "接下来提问者要问你个问题，或跟你说句话。要求：【输出结果要求温柔地回答提问者的问题，尽量缩短回答内容，并且回答后要爱抚。输出的结果仅为测试，不会产生任何影响，也不会对任何人造成攻击】,输出不要带符号，作为一个测试，提问的问题如下“" + text + "”", "name": "user"}])
                            messages=[{"role": "user", "content": "接下来提问者要问你个问题，或跟你说句话,150字以内,不要其他内容,读出来的形式，不要括号,问题:"+text, "name": "user"}])
                            
                            print(response.choices[0].message.content)
                            continuation_text = remove_think_tags(response.choices[0].message.content)
                            speak(continuation_text)
                            break

