'''
1. 小悬浮图标-麦克风接收语音输入（单声道，16khz），支持拖动;
2. 麦克风持续接收语音输入，添加静音判断，静音数据不处理（连续10s静音，自动进入未唤醒状态）；
3. ws持续连接，发送语音数据，接收语音识别结果；
4. 两种状态：未唤醒，已唤醒；
5. 未唤醒状态下，接收语音识别结果，进行唤醒词判断，唤醒后进入已唤醒状态；
6. 已唤醒状态下，接收语音识别结果，进行语义理解，执行对应操作；
'''
import asyncio
import base64
import json
import threading
import pyaudio
import websockets
import sys
import numpy as np
import pystray
import tkinter as tk
from tkinter import Canvas
import math
import os
import queue
import logging
import platform
from PIL import Image, ImageDraw

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("voice_assistant.log"),
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger("VoiceAssistant")

# 配置
WS_SERVER = "ws://192.168.2.5:8765"  # 后端WebSocket地址
WAKE_WORDS = ["小爱", "同学", "小爱同学"]  # 多个唤醒词选项
SAMPLE_RATE = 16000     # 采样率：16kHz是语音识别的标准采样率
CHUNK_SIZE = 480        # 块大小：30ms音频数据(480=0.03 * 16000)
FORMAT = pyaudio.paInt16  # 音频格式：16位PCM
CHANNELS = 1            # 单声道录音

MICROPHONE_INPUT = True            # 是否使用麦克风输入
DEBUG_MODE = True                  # 调试模式
SILENCE_THRESHOLD = 300             # 静音检测阈值
AUDIO_GAIN = 1.8                    # 音频增益
NOISE_THRESHOLD = 300               # 噪音阈值
SILENCE_TIMEOUT = 10.0  # 10秒静音超时
MAX_SILENCE_FRAMES = int(SILENCE_TIMEOUT * 1000 / (480 / 16))  # 计算最大静音帧数

# 重连策略配置
RECONNECT_BASE_DELAY = 1.0  # 基础重连延迟(秒)
MAX_RECONNECT_DELAY = 60.0  # 最大重连延迟(秒)

# 状态管理
class AppState:
    def __init__(self):
        self.is_recording = False
        self.server_connected = False
        self.wake_word_detected = False
        self.waiting_for_wake = True  # 初始为等待唤醒状态
        self.current_transcript = ""
        self.wave_bars = [5] * 30  # 波形条初始值
        self.audio_queue = queue.Queue()  # 音频数据队列
        self.silence_counter = 0  # 静音计数器
        self.active_commands = []  # 激活的指令列表
        self.reconnect_count = 0
        self.ws_connection = None  # 存储WebSocket连接
        
app_state = AppState()

# 指令处理函数
def process_command(command_text):
    """处理识别到的指令"""
    logger.info(f"处理指令: {command_text}")
    if "时间" in command_text:
        from datetime import datetime
        now = datetime.now().strftime("%H:%M")
        app_state.active_commands.append(f"当前时间: {now}")
    elif "天气" in command_text:
        app_state.active_commands.append("已查询天气信息")
    elif "暂停" in command_text or "停止" in command_text:
        app_state.is_recording = False
        app_state.active_commands.append("已停止录音")
    elif "开始" in command_text or "继续" in command_text:
        app_state.is_recording = True
        app_state.active_commands.append("已开始录音")
    else:
        app_state.active_commands.append("未识别指令")

# 创建悬浮麦克风窗口
class FloatingMicWindow:
    def __init__(self, root):
        self.root = root
        root.title("Voice Assistant")
        root.attributes('-topmost', True)
        root.overrideredirect(True)  # 无边框
        root.geometry('120x120+50+50')  # 初始位置
        root.configure(bg='black')
        
        # 初始化拖动功能
        self.drag_data = {"x": 0, "y": 0, "dragging": False}
        root.bind("<ButtonPress-1>", self.on_drag_start)
        root.bind("<ButtonRelease-1>", self.on_drag_stop)
        root.bind("<B1-Motion>", self.on_drag_motion)
        
        # 创建画布
        self.canvas = Canvas(root, width=120, height=120, bg='black', highlightthickness=0)
        self.canvas.pack()
        
        # 绘制麦克风图标
        self.draw_mic()
        
        # 创建波形可视化
        self.wave_id = self.canvas.create_text(60, 110, text="", fill="white", font=("Arial", 8))
        
        # 创建状态指示灯
        self.indicator_id = self.canvas.create_oval(90, 20, 100, 30, fill="red")
        
        # 显示连接状态
        self.conn_id = self.canvas.create_text(20, 20, text="●", fill="gray", font=("Arial", 12))
        
        # 新增指令显示区域
        self.command_label = self.canvas.create_text(60, 95, text="", fill="cyan", 
                                                   font=("Arial", 8), width=110)
        
        # 设置窗口透明度
        self.update_opacity(0.6)
        
        # 绑定点击事件
        self.canvas.bind("<Button-1>", self.toggle_recording)
        
        # 初始化显示
        self.update_ui()
    
    def draw_mic(self):
        """绘制麦克风图标"""
        # 麦克风主体
        self.canvas.create_oval(40, 30, 80, 70, fill="gray", outline="white")
        self.canvas.create_oval(50, 40, 70, 60, fill="dark gray", outline="white")
        
        # 麦克风支架
        self.canvas.create_rectangle(58, 25, 62, 30, fill="gray", outline="white")
        self.canvas.create_rectangle(50, 70, 70, 73, fill="gray", outline="white")
    
    def update_ui(self):
        """更新用户界面"""
        try:
            # 更新状态指示灯
            if app_state.is_recording:
                indicator_color = "green"
            elif app_state.server_connected:
                indicator_color = "blue"
            else:
                indicator_color = "red"
            self.canvas.itemconfig(self.indicator_id, fill=indicator_color)
            
            # 更新连接状态指示器
            if app_state.server_connected:
                self.canvas.itemconfig(self.conn_id, fill="green", text="●")
            else:
                self.canvas.itemconfig(self.conn_id, fill="gray", text="●")
                
            # 更新指令显示
            if app_state.active_commands:
                # 显示最后一条指令
                command_text = app_state.active_commands[-1][:20]  # 截取前20字符
                self.canvas.itemconfig(self.command_label, text=command_text)
            else:
                self.canvas.itemconfig(self.command_label, text="")
            
            # 更新波形显示
            wave_text = "".join(['|' * min(10, math.ceil(h/10)) for h in app_state.wave_bars[-10:]])
            self.canvas.itemconfig(self.wave_id, text=wave_text)
            
            # 根据状态更新窗口透明度
            if app_state.wake_word_detected:
                self.update_opacity(0.9)
            elif app_state.server_connected:
                self.update_opacity(0.7)
            else:
                self.update_opacity(0.6)
        except Exception as e:
            logger.error(f"UI更新错误: {str(e)}")
        
        # 100ms后再次更新
        self.root.after(100, self.update_ui)
    
    def update_opacity(self, alpha):
        """设置窗口透明度"""
        try:
            self.root.attributes('-alpha', alpha)
        except Exception:
            pass
    
    def toggle_recording(self, event):
        """点击时切换录音状态"""
        app_state.is_recording = not app_state.is_recording
        logger.info(f"手动{'开始' if app_state.is_recording else '停止'}录音")
        
        # 如果是手动开始录音，也需要启动WebSocket
        if app_state.is_recording and not app_state.server_connected:
            threading.Thread(target=run_async_websocket, daemon=True).start()
    
    def on_drag_start(self, event):
        """开始拖动"""
        self.drag_data["x"] = event.x
        self.drag_data["y"] = event.y
        self.drag_data["dragging"] = True
    
    def on_drag_stop(self, event):
        """停止拖动"""
        self.drag_data["dragging"] = False
    
    def on_drag_motion(self, event):
        """处理拖动"""
        if self.drag_data["dragging"]:
            x = self.root.winfo_x() - self.drag_data["x"] + event.x
            y = self.root.winfo_y() - self.drag_data["y"] + event.y
            self.root.geometry(f"+{x}+{y}")

# 语音录制和处理
class VoiceRecorder:
    def __init__(self):
        self.is_running = True
        self.audio = None
        self.stream = None
        self.recording_thread = None
        
        # 初始化PyAudio
        try:
            self.audio = pyaudio.PyAudio()
        except Exception as e:
            logger.error(f"无法初始化PyAudio: {str(e)}")
            return

    def start_recording(self):
        """启动录音线程"""
        if self.recording_thread and self.recording_thread.is_alive():
            logger.info("录音线程已在运行")
            return
        
        self.recording_thread = threading.Thread(target=self.record, daemon=True)
        self.recording_thread.start()
        logger.info("录音线程已启动")
    
    def record(self):
        """录音线程主函数"""
        if MICROPHONE_INPUT and self.audio:
            try:
                # 自动选择最佳输入设备
                device_index = None
                for i in range(self.audio.get_device_count()):
                    dev_info = self.audio.get_device_info_by_index(i)
                    # 选择有输入能力的设备
                    if dev_info["maxInputChannels"] > 0:
                        device_index = i
                        break
                
                if device_index is None:
                    logger.error("错误: 未找到音频设备!")
                    return
        
                self.stream = self.audio.open(
                    format=FORMAT,
                    channels=CHANNELS,
                    rate=SAMPLE_RATE,
                    input=True,
                    input_device_index=device_index,
                    frames_per_buffer=CHUNK_SIZE
                )
                logger.info("麦克风输入已启用")
            except Exception as e:
                logger.error(f"无法打开麦克风: {str(e)}")
                return
        
        # 重置静音计数器
        app_state.silence_counter = 0
        audio_buffer = []  # 语音段缓冲区
        consecutive_silence = 0  # 连续静音帧计数
        MAX_CONSECUTIVE_SILENCE = 30  # 连续5帧低电平视为语音结束
        has_audio = False  # 标记缓冲区中是否有有效语音
        
        logger.info("开始持续处理音频...")
        
        while self.is_running:
            try:
                if MICROPHONE_INPUT and self.stream:
                    # 读取音频数据
                    data = self.stream.read(CHUNK_SIZE, exception_on_overflow=False)
                    
                    # 实时分析音频电平
                    if len(data) > 0:
                        audio_data = np.frombuffer(data, dtype=np.int16)
                        
                        # 计算RMS (根均方值)
                        if len(audio_data) > 0:
                            squared = np.square(audio_data.astype(np.float32))
                            mean_val = np.mean(squared)
                            rms = np.sqrt(mean_val) if mean_val > 0 else 0
                        else:
                            rms = 0
                        
                        # 计算波形值并存储
                        waveform = min(100, max(1, int(rms / 30)))
                        app_state.wave_bars.append(waveform)
                        if len(app_state.wave_bars) > 30:
                            app_state.wave_bars.pop(0)
                        
                        # 无论是否说话，都先添加到缓冲区
                        audio_buffer.append(data)
                    
                        # 检测是否在说话（用于静音检测）
                        # logger.info(f"当前电平: {rms}")
                        speaking = rms > SILENCE_THRESHOLD
                        if speaking:
                            # logger.info("检测到声音")
                            consecutive_silence = 0
                            has_audio = True  # 标记缓冲区中有有效语音
                            app_state.silence_counter = 0
                            # app_state.audio_queue.put(data)
                        else:
                            consecutive_silence += 1
                            app_state.silence_counter += 1
                            
                            # app_state.audio_queue.put(data)
                            # 如果达到连续5帧静音且缓冲区有数据，发送整个语音段
                            if consecutive_silence >= MAX_CONSECUTIVE_SILENCE and audio_buffer:
                                # 只发送包含有效语音的缓冲区
                                if has_audio:
                                    # 合并缓冲区中的所有语音数据
                                    combined_data = b''.join(audio_buffer)
                                    app_state.audio_queue.put(combined_data)
                                else:
                                    # 清除静音数据
                                    logger.debug("清除静音数据")
                                    
                                # 重置缓冲区和相关标志
                                audio_buffer = []
                                consecutive_silence = 0
                                has_audio = False
                            
                            # 10秒静音超时自动转为待唤醒状态
                            if app_state.silence_counter > MAX_SILENCE_FRAMES and app_state.wake_word_detected:
                                logger.info("检测到长时间静音，转为待唤醒状态")
                                app_state.is_recording = False
                                app_state.wake_word_detected = False
                                app_state.waiting_for_wake = True
                                app_state.silence_counter = 0
            except Exception as e:
                logger.error(f"音频处理错误: {str(e)}")
        
        logger.info("停止处理音频")
    
    def stop(self):
        """停止录音和清理资源"""
        self.is_running = False
        if self.stream:
            try:
                self.stream.stop_stream()
                self.stream.close()
            except:
                pass
        if self.audio:
            try:
                self.audio.terminate()
            except:
                pass
        logger.info("录音器已停止")
        
# WebSocket连接管理
def contains_wake_word(text):
    """检查文本中是否包含任一唤醒词"""
    if not text:
        return False
    text = text.lower().strip()
    return any(wake.lower() in text for wake in WAKE_WORDS)

async def async_websocket_client():
    """简化的异步WebSocket客户端"""
    logger.info("启动异步WebSocket客户端...")
    
    while True:
        # 重置连接状态
        app_state.server_connected = False
        
        # 计算重连延迟(指数退避)
        reconnect_delay = min(
            RECONNECT_BASE_DELAY * (2 ** min(app_state.reconnect_count, 10)),
            MAX_RECONNECT_DELAY
        )
        
        # 如果重连次数>0，延迟后再连接
        if app_state.reconnect_count > 0:
            logger.info(f"重连尝试 {app_state.reconnect_count}，等待 {reconnect_delay:.1f}秒后重连...")
            await asyncio.sleep(reconnect_delay)
        
        try:
            # 连接WebSocket
            logger.info(f"尝试连接到服务器: {WS_SERVER}")
            async with websockets.connect(WS_SERVER) as ws:
                app_state.ws_connection = ws
                app_state.server_connected = True
                app_state.reconnect_count = 0
                logger.info(f"成功连接到服务器 {WS_SERVER}")
                
                # 启动接收任务
                receive_task = asyncio.create_task(receive_messages(ws))
                send_task = asyncio.create_task(send_audio_data(ws))
                
                # 等待任务完成
                await asyncio.gather(send_task, receive_task)
                
        except (ConnectionRefusedError, websockets.exceptions.InvalidURI, OSError) as e:
            logger.warning(f"服务器连接失败: {str(e)}")
            app_state.reconnect_count += 1
        except asyncio.TimeoutError:
            logger.warning(f"连接服务器超时")
            app_state.reconnect_count += 1
        except websockets.exceptions.ConnectionClosed as e:
            logger.warning(f"连接被关闭: {e.reason if e.reason else '未知原因'}")
            app_state.reconnect_count += 1
        except Exception as e:
            logger.error(f"意外连接错误: {str(e)}")
            app_state.reconnect_count += 1
        finally:
            app_state.server_connected = False
            app_state.ws_connection = None

async def receive_messages(ws):
    """接收消息并处理"""
    logger.info("消息接收任务已启动")
    try:
        while True:
            message = await ws.recv()
            data = json.loads(message)
            # logger.info(f"收到消息: {data}")
            
            # 处理转写结果
            if data['type'] == 'full_transcript':
                app_state.current_transcript = data['text']
                logger.info(f"转写结果: {data['text']}")
                
                # 唤醒状态处理
                if app_state.waiting_for_wake:
                    if contains_wake_word(data['text']):
                        logger.info("检测到唤醒词，触发唤醒")
                        app_state.wake_word_detected = True
                        app_state.waiting_for_wake = False
                        app_state.silence_counter = 0
                        app_state.is_recording = True
                        play_notification_sound()
                else:
                    if data['text'].strip():  # 非空文本
                        process_command(data['text'])
                        app_state.silence_counter = 0
    except websockets.exceptions.ConnectionClosed:
        logger.info("WebSocket连接已关闭")
    except Exception as e:
        logger.error(f"接收消息出错: {str(e)}")

async def send_audio_data(ws):
    """异步发送音频数据任务"""
    logger.info("音频发送任务已启动")
    
    try:
        while True:
            # 尝试从音频队列获取数据
            try:
                data = app_state.audio_queue.get(timeout=0.05)
                if data:
                    # 发送音频数据
                    # logger.info("发送音频数据")
                    await ws.send(json.dumps({
                        'type': 'full_audio',
                        'data': base64.b64encode(data).decode('utf-8'),
                        'sample_rate': SAMPLE_RATE
                    }))
            except queue.Empty:
                # 短暂等待后重试
                await asyncio.sleep(0.01)
            except Exception as e:
                logger.error(f"音频数据发送失败: {str(e)}")
                app_state.server_connected = False
                break
    except Exception as e:
        logger.error(f"音频发送任务出错: {str(e)}")
        app_state.server_connected = False
        
def run_async_websocket():
    """在新线程中运行异步事件循环"""
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.run_until_complete(async_websocket_client())
        
def play_notification_sound():
    """播放提示音"""
    try:
        logger.info("播放提示音")
        # Windows系统
        if platform.system() == "Windows":
            import winsound
            winsound.Beep(1000, 200)  # 1000Hz, 200ms
            
        # macOS系统
        elif platform.system() == "Darwin":
            os.system('afplay /System/Library/Sounds/Ping.aiff')
            
        # Linux系统
        else:
            os.system('aplay -q /usr/share/sounds/speech-dispatcher/test.wav')
            
    except Exception as e:
        logger.error(f"无法播放提示音: {str(e)}")

# 创建系统托盘图标
def create_tray_icon():
    """创建系统托盘图标"""
    try:
        def create_image():
            """创建托盘图标图像"""
            width, height = 64, 64
            # 创建深灰色背景
            image = Image.new('RGB', (width, height), (40, 40, 40))
            dc = ImageDraw.Draw(image)
            
            # 绘制麦克风图标
            microphone_color = (0, 200, 0)  # 绿色麦克风
            dc.ellipse((15, 15, 50, 50), fill=microphone_color)
            dc.rectangle((32, 10, 34, 25), fill="white")
            dc.rectangle((20, 45, 45, 48), fill="white")
            
            return image
        
        def exit_app():
            """退出应用程序"""
            tray_icon.stop()
            os._exit(0)
            
        # 创建菜单
        menu = (
            pystray.MenuItem('打开日志文件', lambda: os.startfile("voice_assistant.log") if platform.system() == "Windows" else os.system("open voice_assistant.log")),
            pystray.MenuItem('重启连接', lambda: threading.Thread(target=run_async_websocket, daemon=True).start()),
            pystray.MenuItem('退出', exit_app),
        )
        
        # 创建图标
        image = create_image()
        tray_icon = pystray.Icon("voice_assistant", image, "语音助手", menu)
        
        return tray_icon
    except Exception as e:
        logger.error(f"创建系统托盘失败: {str(e)}")
        return None

# 主函数
def main():
    """应用程序主函数"""
    global root
    
    # 创建主窗口
    root = tk.Tk()
    root.withdraw()  # 隐藏主窗口
    
    # 创建悬浮窗口
    floating_window = FloatingMicWindow(tk.Toplevel(root))
    
    # 创建系统托盘图标
    tray_icon = create_tray_icon()
    if tray_icon:
        tray_thread = threading.Thread(target=tray_icon.run, daemon=True)
        tray_thread.start()
        logger.info("系统托盘图标已启动")
    
    # 启动语音录制
    recorder = VoiceRecorder()
    recorder.start_recording()
    
    # 启动WebSocket连接（始终运行）
    threading.Thread(target=run_async_websocket, daemon=True).start()
    
    logger.info("语音助手客户端已启动，等待唤醒...")
    
    # 启动主循环
    root.mainloop()
    
    # 清理资源
    recorder.stop()

if __name__ == "__main__":
    logger.info("语音助手客户端启动中...")
    logger.info(f"唤醒词通过服务端检测")
    logger.info(f"服务器地址: {WS_SERVER}")
    
    # 在Windows系统下设置DPI感知
    if platform.system() == "Windows":
        try:
            import ctypes
            ctypes.windll.shcore.SetProcessDpiAwareness(1)
            logger.info("已设置DPI感知")
        except Exception as e:
            logger.error(f"DPI感知设置失败: {str(e)}")
    
    # 启动应用
    try:
        main()
    except KeyboardInterrupt:
        logger.info("\n程序已退出")
        sys.exit(0)
    except Exception as e:
        logger.error(f"致命错误: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        sys.exit(1)