#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
语音对话助手
整合语音识别与DeepSeek大模型，实现全语音交互
通过空格键控制录音，自动识别并回复
"""

import os
import sys
import json
import time
import threading
import queue
import asyncio
import edge_tts
import tempfile
from concurrent.futures import ThreadPoolExecutor
from openai import OpenAI

# =============== 语音识别相关配置 ===============

# 尝试导入必要的库
try:
    import pyaudio
except ImportError:
    print("错误：未找到PyAudio库。请安装：pip install pyaudio")
    sys.exit(1)

try:
    from vosk import Model, KaldiRecognizer, SetLogLevel
    # 设置日志级别为仅显示错误
    SetLogLevel(0)
except ImportError:
    print("错误：未找到Vosk库。请安装：pip install vosk==0.3.45")
    sys.exit(1)

# 检查是否可以使用msvcrt进行键盘输入检测（仅限Windows）
try:
    import msvcrt
    WINDOWS = True
except ImportError:
    WINDOWS = False
    print("提示：非Windows系统，键盘控制功能可能受限")

# =============== 文本转语音相关配置 ===============

# 创建临时目录用于存储语音文件
TEMP_DIR = tempfile.gettempdir()

# 定义常用的中文音色
CHINESE_VOICES = {
    "1": "zh-CN-XiaoxiaoNeural",   # 女声（默认）
    "2": "zh-CN-YunxiNeural",      # 男声
    "3": "zh-CN-XiaoyiNeural",     # 女声（温柔）
    "4": "zh-CN-YunjianNeural",    # 男声（稳重）
    "5": "zh-CN-XiaohanNeural",    # 女声（活泼）
    "6": "zh-CN-XiaomoNeural",     # 女声（成熟）
    "7": "zh-CN-XiaoxuanNeural",   # 女声（温暖）
    "8": "zh-CN-YunyangNeural"     # 男声（成熟）
}

# 默认使用的语音
DEFAULT_VOICE = CHINESE_VOICES["1"]

# =============== 系统音效 ===============

async def play_startup_tone():
    """播放系统启动提示音（通过Edge TTS）"""
    timestamp = int(time.time())
    sound_file = os.path.join(TEMP_DIR, f"startup_tone_{timestamp}.mp3")
    
    # 使用Edge TTS生成一个简短的"叮"声
    communicate = edge_tts.Communicate(".", "zh-CN-XiaoxiaoNeural")
    communicate.rate = "+150%"  # 加快速度
    communicate.volume = "+0%"  # 正常音量
    
    try:
        # 生成并保存提示音
        await communicate.save(sound_file)
        
        # 播放提示音
        play_audio(sound_file)
        
        # 给点时间播放
        await asyncio.sleep(0.5)
        
        return True
    except Exception as e:
        print(f"播放启动提示音时出错: {e}")
        return False

# 这个版本使用edge_tts生成提示音，更加可靠
def play_startup_sound():
    """播放系统启动音效"""
    try:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        loop.run_until_complete(play_startup_tone())
    except Exception as e:
        print(f"播放启动音效时出错: {e}")

# =============== 语音识别模块 ===============

# 定义模型路径
MODEL_NAME = "vosk-model-small-cn-0.22"
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))

# 支持两种常见的模型路径结构
MODEL_PATHS = [
    os.path.join(CURRENT_DIR, MODEL_NAME),                # 单层目录
    os.path.join(CURRENT_DIR, MODEL_NAME, MODEL_NAME)     # 双层目录
]

# 音频参数
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 4000

def check_model_path():
    """检查模型路径是否有效并返回可用的路径"""
    for path in MODEL_PATHS:
        print(f"检查模型路径: {path}")
        if os.path.exists(path):
            # 检查必要的子目录
            required = ['conf', 'am', 'graph']
            missing = [d for d in required if not os.path.exists(os.path.join(path, d))]
            
            if not missing:
                print(f"✓ 找到有效的模型路径: {path}")
                return path
            else:
                print(f"✗ 模型路径存在但缺少必要子目录: {', '.join(missing)}")
    
    print("错误: 无法找到有效的模型路径")
    print(f"请确保 {MODEL_NAME} 文件夹存在，并包含必要的子目录")
    sys.exit(1)

class VoiceSpeechRecognizer:
    """语音识别器类"""
    
    def __init__(self):
        self.audio = pyaudio.PyAudio()
        self.model_path = check_model_path()
        self.audio_queue = queue.Queue()
        self.recording = False
        self.results = []
        self.recognizer = None
        self.stream = None
        self.process_thread = None
        self.last_result = ""  # 存储最后一次识别结果
        
        # 尝试加载模型
        try:
            self.model = Model(self.model_path)
            print("模型加载成功")
        except Exception as e:
            print(f"模型加载失败: {e}")
            try:
                # 尝试替代路径格式（解决某些Windows路径问题）
                alt_path = self.model_path.replace('\\', '/')
                self.model = Model(alt_path)
                print("使用替代路径格式成功加载模型")
            except Exception as e:
                print(f"所有模型加载尝试均失败: {e}")
                sys.exit(1)
    
    def list_audio_devices(self):
        """列出所有可用的音频输入设备"""
        print("\n可用的音频输入设备:")
        print("=" * 50)
        print(f"{'ID':<4}| {'设备名称':<40}")
        print("-" * 50)
        
        input_devices = []
        for i in range(self.audio.get_device_count()):
            device_info = self.audio.get_device_info_by_index(i)
            if device_info.get('maxInputChannels') > 0:
                print(f"{i:<4}| {device_info.get('name')}")
                input_devices.append(i)
        
        print("=" * 50)
        return input_devices
    
    def get_default_device(self):
        """获取默认输入设备ID"""
        try:
            info = self.audio.get_host_api_info_by_index(0)
            return info.get('defaultInputDevice')
        except:
            # 如果无法获取默认设备，尝试找到第一个输入设备
            for i in range(self.audio.get_device_count()):
                device_info = self.audio.get_device_info_by_index(i)
                if device_info.get('maxInputChannels') > 0:
                    return i
            return None
    
    def audio_callback(self, in_data, frame_count, time_info, status):
        """音频回调函数，将收到的音频数据放入队列"""
        if self.recording:
            self.audio_queue.put(in_data)
        return (in_data, pyaudio.paContinue)
    
    def process_audio_data(self):
        """处理音频数据并执行识别"""
        self.recognizer = KaldiRecognizer(self.model, RATE)
        
        while True:
            # 检查是否应退出线程
            if not self.recording and self.audio_queue.empty():
                break
            
            try:
                # 获取音频数据（设置超时防止无限阻塞）
                data = self.audio_queue.get(block=True, timeout=0.5)
                
                # 处理音频数据
                if self.recognizer.AcceptWaveform(data):
                    result = json.loads(self.recognizer.Result())
                    text = result.get('text', '').strip()
                    if text:
                        print(f"\n识别结果: {text}")
                        self.results.append(text)
                else:
                    # 显示部分结果
                    partial = json.loads(self.recognizer.PartialResult())
                    partial_text = partial.get('partial', '')
                    if partial_text:
                        print(f"\r当前识别: {partial_text}", end='', flush=True)
                
                self.audio_queue.task_done()
            
            except queue.Empty:
                pass  # 队列为空，继续等待
    
    def start_recording(self, device_id):
        """开始录音"""
        if self.recording:
            return
        
        # 清空之前的结果
        self.results.clear()
        self.audio_queue = queue.Queue()
        
        # 打开音频流
        try:
            self.stream = self.audio.open(
                format=FORMAT,
                channels=CHANNELS,
                rate=RATE,
                input=True,
                input_device_index=device_id,
                frames_per_buffer=CHUNK,
                stream_callback=self.audio_callback
            )
            
            self.recording = True
            
            # 启动处理线程
            self.process_thread = threading.Thread(target=self.process_audio_data)
            self.process_thread.daemon = True
            self.process_thread.start()
            
            print("\n✓ 开始录音...")
            
        except Exception as e:
            print(f"\n错误: 无法打开音频设备 (ID: {device_id})")
            print(f"详细信息: {str(e)}")
            self.recording = False
    
    def stop_recording(self):
        """停止录音并返回识别结果"""
        if not self.recording:
            return ""
        
        print("\n✓ 停止录音...")
        self.recording = False
        
        # 关闭音频流
        if self.stream is not None:
            self.stream.stop_stream()
            self.stream.close()
            self.stream = None
        
        # 等待处理线程完成
        if self.process_thread is not None:
            self.process_thread.join(timeout=2.0)
            self.process_thread = None
        
        # 处理最后的数据
        if self.recognizer is not None:
            final_result = json.loads(self.recognizer.FinalResult())
            final_text = final_result.get('text', '').strip()
            if final_text:
                print(f"最终识别: {final_text}")
                self.results.append(final_text)
        
        # 合并并返回结果
        if self.results:
            self.last_result = " ".join(self.results)
            return self.last_result
        
        return ""
    
    def close(self):
        """关闭资源"""
        self.stop_recording()
        self.audio.terminate()

# =============== 文本转语音模块 ===============

async def text_to_speech(text, output_file, voice=DEFAULT_VOICE):
    """将文本转换为语音并保存到文件"""
    communicate = edge_tts.Communicate(text, voice)
    await communicate.save(output_file)

def play_audio(file_path):
    """使用系统默认播放器播放音频文件"""
    if sys.platform == "win32":
        os.system(f'start /min "" "{file_path}"')
    elif sys.platform == "darwin":
        os.system(f"afplay '{file_path}'")
    else:
        os.system(f"xdg-open '{file_path}'")

async def process_tts(text, voice):
    """处理文本到语音的转换并播放"""
    # 创建临时文件
    timestamp = int(time.time())
    audio_file = os.path.join(TEMP_DIR, f"ai_response_{timestamp}.mp3")
    
    # 转换文本到语音
    await text_to_speech(text, audio_file, voice)
    
    # 播放音频
    play_audio(audio_file)
    
    return audio_file

def run_async(coro):
    """运行异步函数"""
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    return loop.run_until_complete(coro)

def show_voice_options():
    """显示可用的语音选项"""
    print("\n可用的语音选项：")
    for key, voice in CHINESE_VOICES.items():
        # 提取语音名称中的性别信息
        gender_info = "女声" if "Xiao" in voice else "男声"
        print(f"  {key}. {voice} ({gender_info})")

# =============== 语音对话助手主类 ===============

class VoiceAssistant:
    """语音对话助手主类"""
    
    def __init__(self):
        # 初始化语音识别
        self.recognizer = VoiceSpeechRecognizer()
        
        # 初始化OpenAI客户端，连接到Deepseek API
        self.client = OpenAI(
            api_key="sk-bab1e4f901344202b2f514efe1200128",
            base_url="https://api.deepseek.com"
        )
        
        # 初始化对话历史
        self.messages = [
            {"role": "system", "content": "你是一个友好的语音助手，能够用简洁易懂的中文回答用户问题。请保持回答简短明了，因为用户通过语音与你交互。"}
        ]
        
        # 设备ID
        self.device_id = None
        
        # 当前使用的语音
        self.current_voice = DEFAULT_VOICE
        
        # 语音播放状态（默认开启）
        self.tts_enabled = True
        
        # 创建线程池用于异步处理TTS
        self.executor = ThreadPoolExecutor(max_workers=1)
    
    def setup(self):
        """设置和初始化"""
        print("\n===== 语音对话助手 =====")
        print("基于Vosk语音识别和DeepSeek AI")
        print("=" * 50)
        
        # 列出可用设备
        input_devices = self.recognizer.list_audio_devices()
        if not input_devices:
            print("错误：未找到任何输入设备")
            return False
        
        # 选择设备
        default_device = self.recognizer.get_default_device()
        if default_device is not None and default_device in input_devices:
            print(f"\n默认使用设备 [{default_device}]")
            choice = input("按Enter使用默认设备，或输入设备ID选择其他设备: ").strip()
            if not choice:
                self.device_id = default_device
            else:
                try:
                    self.device_id = int(choice)
                    if self.device_id not in input_devices:
                        print(f"错误：无效的设备ID {self.device_id}")
                        return False
                except ValueError:
                    print("错误：请输入有效的数字ID")
                    return False
        else:
            try:
                self.device_id = int(input("\n请选择麦克风设备ID: "))
                if self.device_id not in input_devices:
                    print(f"错误：无效的设备ID {self.device_id}")
                    return False
            except ValueError:
                print("错误：请输入有效的数字ID")
                return False
        
        return True
    
    def greet_user(self):
        """向用户问候"""
        greeting = "你好！欢迎使用AI语音助手。我可以帮你回答问题、提供信息或者陪你聊天。按住空格键开始说话，松开后我会听懂并回答你。随时说'帮助'可以获取更多指引。"
        print("\n" + greeting)
        
        print("\n正在播放问候语音...", end="", flush=True)
        
        # 播放问候语音，并等待完成
        future = self.executor.submit(run_async, process_tts(greeting, self.current_voice))
        
        # 等待语音播放完成（或最多等待5秒）
        try:
            future.result(timeout=10)
            print(" 完成")
        except Exception as e:
            print(f"\n语音播放出现问题: {e}")
    
    def show_help(self):
        """显示帮助信息"""
        help_text = """
===== 语音助手使用说明 =====
1. 按住空格键开始说话，松开进行识别和回答
2. 语音命令：
   - "退出"或"再见"：结束对话
   - "帮助"：显示此帮助信息
   - "切换声音"：更改AI回复的语音
3. 键盘快捷键：
   - 空格键：按住说话，松开识别
   - ESC键：退出程序
   - H键：显示帮助
============================"""
        print(help_text)
    
    def get_ai_response(self, query):
        """获取AI回复（流式版本）"""
        if not query.strip():
            return "抱歉，我没有听清您说的话。"
        
        # 处理特殊命令
        if query in ["退出", "再见"]:
            return "再见，很高兴为您服务！"
        
        # 将用户消息添加到对话历史
        self.messages.append({"role": "user", "content": query})
        
        try:
            # 流式请求API
            stream = self.client.chat.completions.create(
                model="deepseek-chat",
                messages=self.messages,
                max_tokens=500,
                stream=True
            )
            
            # 清空缓冲区
            self.response_buffer = ""
            self.streaming = True
            
            # 启动流式处理线程
            threading.Thread(target=self.process_stream, args=(stream,)).start()
            
            return "[开始生成回答...]"
            
        except Exception as e:
            return f"请求出错：{str(e)}"

    def process_stream(self, stream):
        """处理流式响应"""
        full_response = ""
        try:
            for chunk in stream:
                content = chunk.choices[0].delta.content
                if content:
                    # 更新缓冲区和完整响应
                    self.response_buffer += content
                    full_response += content
                    
                    # 实时打印到控制台
                    print(content, end='', flush=True)
                    
                    # 每积累一定字符播放一次
                    if len(self.response_buffer) >= 30:
                        if self.tts_enabled:
                            self.executor.submit(
                                run_async, 
                                process_tts(self.response_buffer, self.current_voice)
                            )
                        self.response_buffer = ""
            
            # 处理剩余缓冲
            if self.response_buffer and self.tts_enabled:
                self.executor.submit(
                    run_async, 
                    process_tts(self.response_buffer, self.current_voice)
                )
            
            # 保存完整响应到对话历史
            self.messages.append({"role": "assistant", "content": full_response})
            
        except Exception as e:
            print(f"\n流式处理错误: {e}")
        finally:
            self.streaming = False

    def process_command(self, command):
        """处理输入的命令"""
        if command.startswith('/voice '):
            try:
                voice_option = command.split(' ')[1]
                if voice_option in CHINESE_VOICES:
                    self.current_voice = CHINESE_VOICES[voice_option]
                    voice_type = "女声" if "Xiao" in self.current_voice else "男声"
                    print(f"\n已切换到语音: {self.current_voice} ({voice_type})")
                    
                    # 播放示例以测试新语音
                    test_text = "您好，这是新选择的语音。"
                    print(f"播放测试语音: '{test_text}'")
                    self.executor.submit(run_async, process_tts(test_text, self.current_voice))
                    return True
                else:
                    print(f"\n无效的语音选项: {voice_option}")
                    show_voice_options()
            except (IndexError, ValueError):
                print("\n语音切换命令格式错误，请使用 /voice <编号>")
                show_voice_options()
        return False
    
    def run(self):
        """运行语音对话助手"""
        # 设置初始化
        if not self.setup():
            return
        
        # 显示欢迎标题
        print("\n" + "=" * 50)
        print("🎤 AI语音对话助手已启动 🤖")
        print("=" * 50)
        
        # 播放启动音效
        play_startup_sound()
        
        # 问候用户
        self.greet_user()
        
        # 短暂暂停，让用户有时间阅读问候
        time.sleep(1)
        
        # 显示帮助
        self.show_help()
        
        # 主循环
        try:
            while True:
                print("\n等待您按空格键开始说话...", flush=True)
                
                if WINDOWS:
                    # Windows系统使用msvcrt进行键盘检测
                    is_speaking = False
                    
                    while True:
                        if msvcrt.kbhit():
                            key = msvcrt.getch()
                            
                            # 显示帮助（H键）
                            if key == b'h' or key == b'H':
                                self.show_help()
                            
                            # 空格键 - 切换录音状态
                            elif key == b' ':
                                if not is_speaking:
                                    # 开始录音
                                    is_speaking = True
                                    self.recognizer.start_recording(self.device_id)
                                    print("正在录音，松开空格键结束...", flush=True)
                                
                            # Esc键 - 退出程序
                            elif key == b'\x1b':
                                print("\n退出程序...")
                                return
                        
                        # 检测空格键释放
                        if is_speaking and not msvcrt.kbhit() and not msvcrt.getch() == b' ':
                            is_speaking = False
                            # 停止录音并获取结果
                            user_input = self.recognizer.stop_recording()
                            if user_input:
                                # 获取AI回复
                                response = self.get_ai_response(user_input)
                                print(f"\nAI: {response}")
                                
                                # 转换为语音播放
                                if self.tts_enabled:
                                    self.executor.submit(run_async, process_tts(response, self.current_voice))
                            break
                        
                        # 短暂睡眠以减少CPU使用
                        time.sleep(0.05)
                        
                else:
                    # 非Windows系统实现
                    print("按Enter开始录音...")
                    input()
                    
                    # 开始录音
                    self.recognizer.start_recording(self.device_id)
                    print("正在录音，按Enter结束...")
                    input()
                    
                    # 停止录音并获取结果
                    user_input = self.recognizer.stop_recording()
                    if user_input:
                        # 获取AI回复
                        response = self.get_ai_response(user_input)
                        print(f"\nAI: {response}")
                        
                        # 转换为语音播放
                        if self.tts_enabled:
                            self.executor.submit(run_async, process_tts(response, self.current_voice))
                
                # 检查是否有直接输入的命令（例如切换语音）
                command = input("\n输入命令(或按Enter继续): ").strip()
                if command:
                    if command.lower() in ['exit', 'quit', '退出']:
                        print("\n感谢使用，再见！")
                        break
                    
                    self.process_command(command)
        
        except KeyboardInterrupt:
            print("\n程序被中断")
        finally:
            # 确保资源被释放
            self.recognizer.close()
            self.executor.shutdown()
    
def main():
    """主函数"""
    assistant = VoiceAssistant()
    assistant.run()

if __name__ == "__main__":
    main() 