import base64
import hashlib
import hmac
import json
import os
# 添加导入generalRequest模块
import sys
import threading
import time
from urllib.parse import urlencode, quote

import pyaudio
from websocket import create_connection, WebSocketException  # 改用官方推荐的连接方式

from config import XFYUN_APP_ID, XFYUN_API_KEY, XFYUN_API_SECRET, VOICE_APP_ID, VOICE_API_KEY, \
    VOICE_API_SECRET

sys.path.append(os.path.join(os.path.dirname(__file__), 'voiceLLM'))
import Mapper.voiceDB_mapper as generalRequest

# 导入新的音频录制/播放工具
from utils.tts_player import speak_text
from utils.silero_vad import SileroVAD  # 导入Silero VAD

# 新增：导入Agent和工具相关模块
from controller.serviceController import create_websocket_tools, get_llm_response
from Tools.websocket_tool import set_websocket_server, set_voice_worker
from Tools.voice_management_tool import set_global_instances


class VoiceWorker:
    def __init__(self, pet_window, websocket_server):
        self.pet_window = pet_window
        self.websocket_server = websocket_server
        self.running = False
        self.current_user = None  # 当前用户身份
        self.user_authenticated = False  # 用户是否已通过身份验证
        self.voice_verification_file = None  # 声纹验证音频文件路径

        # 音频参数（严格匹配星火ASR要求）
        self.RATE = 16000  # 固定16k采样率
        self.CHANNELS = 1  # 单声道
        self.FORMAT = pyaudio.paInt16  # 16位深
        self.FRAME_SIZE = 640  # 20ms/帧（16000*20ms*16bit*1ch/8=640字节）

        # 录音控制变量
        self.listening = False
        self.asr_result = ""  # 最终识别结果
        self.asr_final_received = threading.Event()
        self.is_connected = False  # WebSocket连接状态
        self.ws = None  # WebSocket连接对象
        self.session_id = None  # 会话ID（服务端返回）
        self.last_interaction_time = time.time()  # 上次交互时间

        # 添加交互触发事件
        self.interaction_event = threading.Event()

        # 讯飞语音转文字识别配置
        self.xfyun_config = {
            'APPId': XFYUN_APP_ID,
            'APIKey': XFYUN_API_KEY,
            'APISecret': XFYUN_API_SECRET,
            'groupId': '001'  # 使用默认组ID
        }

        self.voice_test = {
            'APPId': VOICE_APP_ID,
            'APIKey': VOICE_API_KEY,
            'APISecret': VOICE_API_SECRET,
            'groupId': '001'
        }

        # 初始化Silero VAD
        self.vad = SileroVAD()

        # 添加语音播放状态标志
        self.is_speaking = False
        self.speaking_lock = threading.Lock()

        # 新增：初始化Agent和工具（核心修改）
        self.tools = create_websocket_tools(websocket_server)
        set_websocket_server(websocket_server)  # 给工具设置WebSocket服务器实例
        set_voice_worker(self)  # 给工具设置VoiceWorker实例，用于获取用户身份
        set_global_instances(voice_worker=self)  # 给voice_management_tool设置VoiceWorker实例

        # 添加对generalRequest的引用，供工具使用
        self.generalRequest = generalRequest

    """启动语音监听线程"""

    def start(self):
        self.running = True
        # 直接启动语音交互循环，不再使用关键词唤醒
        voice_thread = threading.Thread(target=self._voice_interaction_loop, daemon=True)
        voice_thread.start()
        print("👂 语音监听已启动")

    """触发语音交互"""

    """持续语音交互循环"""

    def _voice_interaction_loop(self):
        print("🔊 语音交互循环已启动")
        while self.running:
            # 直接进入语音交互流程
            self._handle_voice_interaction()
            time.sleep(0.1)  # 降低CPU占用

    """语音交互（核心修改：用Agent替代原有发送逻辑）"""

    def _handle_voice_interaction(self):
        # 先检测是否有声音输入
        if not self._detect_voice_activity():
            # 没有声音输入则直接返回
            time.sleep(0.1)  # 短暂休眠
            return
            
        print("🗣️  检测到声音输入，开始处理语音交互")
        
        # 更新上次交互时间
        self.last_interaction_time = time.time()
        # 1. 切换到监听状态
        self.pet_window.safe_set_pet_state('listening')

        # 2. 实时语音转文字（保留原有核心流程）
        text = self._asr_streaming()
        if not text or not text.strip():  # 处理空结果或None
            self.pet_window.safe_set_pet_state('idle')
            self._tts_speak("对不起没有听清，请再说一次")
            print("❌ 语音转文字失败（无有效结果）")
            # 清理声纹验证文件
            self._cleanup_voice_files()
            return

        # 3. 进行声纹识别（使用已保存的音频文件）
        user_identity = None
        print(f"🔍 检查声纹验证文件: {self.voice_verification_file}")
        if self.voice_verification_file and os.path.exists(self.voice_verification_file):
            print(f"🔊 使用文件进行声纹识别: {self.voice_verification_file}")
            user_identity = self._perform_voice_recognition_from_file(self.voice_verification_file)
            if user_identity:
                print(f"👤 识别到用户: {user_identity}")
                # 更新当前用户信息
                self.current_user = user_identity
                self.user_authenticated = True
                # 向前端发送用户身份信息
                self._send_user_identity()
            else:
                print("👤 未识别到已注册用户")
                self.user_authenticated = False
        else:
            print("⚠️  声纹验证音频文件不存在")

        # 4. 切换到思考状态（Agent决策中）
        self.pet_window.safe_set_pet_state('thinking')
        print(f"🤖 正在调用Agent处理请求：{text}")

        # 5. 调用Agent处理用户请求（核心替换逻辑）
        try:
            # 构造完整prompt：包含用户身份信息+用户请求，帮助Agent决策
            user_info = "已认证用户" if self.user_authenticated else "未认证用户"
            
            # 获取用户详细身份信息（如果已认证）
            user_identity_details = "游客"
            if self.user_authenticated and self.current_user:
                try:
                    from Impl.user_title_impl import UserTitleImpl
                    from Impl.wav_impl import WavImpl
                    from config import DB_CONFIG
                    
                    # 首先获取用户ID
                    with WavImpl(DB_CONFIG) as wav_impl:
                        wav_files = wav_impl.get_wav_by_user_english_name(self.current_user)
                        if wav_files:
                            db_user_id = wav_files[0].id
                            
                            # 然后获取用户的所有业务身份
                            with UserTitleImpl(DB_CONFIG) as user_title_impl:
                                user_titles = user_title_impl.get_titles_by_user_id(db_user_id)
                                
                                if user_titles:
                                    # 如果用户有多个身份，组合显示
                                    roles = [f"{title.title_name}({title.level})" for title in user_titles]
                                    user_identity_details = "、".join(roles)
                                else:
                                    user_identity_details = "已认证用户"
                        else:
                            user_identity_details = "已认证用户"
                except Exception as e:
                    print(f"⚠️ 获取用户详细身份信息时出错: {e}")
                    user_identity_details = "已认证用户"
            
            full_prompt = f"""当前用户身份：{user_identity_details}
                              用户状态：{user_info}
                              用户的语音请求：{text}
                           """
            
            # 调用Agent，传入工具列表让其自主选择
            agent_response = get_llm_response(full_prompt, self.tools)

            # 6. 播报Agent处理结果
            self.pet_window.safe_set_pet_state("speaking")
            self._tts_speak(agent_response)

        except Exception as e:
            print(f"❌ Agent调用失败：{str(e)}")
            self.pet_window.safe_set_pet_state("speaking")
            self._tts_speak("处理请求时出现错误，请稍后重试")

        # 7. 恢复初始状态
        time.sleep(1)
        self.pet_window.safe_set_pet_state("idle")
        
        # 8. 清理声纹验证文件
        self._cleanup_voice_files()

    """检测是否有声音输入"""

    def _detect_voice_activity(self):
        """使用Silero VAD检测是否有声音输入"""
        try:
            # 检查是否正在播放音频，避免回声触发
            with self.speaking_lock:
                if self.is_speaking:
                    print("🗣️  正在播放音频，跳过语音检测")
                    return False
            
            # 使用Silero VAD检测语音活动
            speech_detected = self.vad.detect_voice_activity(timeout=5)
            print(f"{'✅' if speech_detected else '🔇'} 声音检测结果: {'检测到有效声音输入' if speech_detected else '未检测到有效声音输入'}")
            return speech_detected
        except Exception as e:
            print(f"❌ Silero VAD检测异常：{str(e)}")
            return False

    """星火ASR-LLM实时语音转写（核心逻辑，参考官方示例重构）"""

    def _asr_streaming(self):
        self.asr_result = ""
        self.asr_final_received.clear()
        self.listening = True  # 初始化时就开始监听
        self.is_connected = False
        self.ws = None
        self.session_id = None

        try:
            # 1. 构建鉴权URL
            url = self._build_xfyun_url()
            print(f"🌍 连接星火ASR-LLM服务器：{url.split('?')[0]}")

            # 2. 建立WebSocket连接（官方推荐create_connection）
            self.ws = create_connection(
                url,
                timeout=15,  # 连接超时时间
                enable_multithread=True  # 支持多线程并发
            )
            self.is_connected = True
            print("【连接成功】WebSocket握手完成，等待服务端就绪...")
            time.sleep(1.5)  # 关键：等待服务端初始化

            # 3. 启动独立接收线程（避免消息阻塞）
            recv_thread = threading.Thread(target=self._recv_msg, daemon=True)
            recv_thread.start()

            # 4. 发送音频流
            self._send_audio_stream()

            # 5. 等待最终结果（超时15秒）
            wait_timeout = 15
            if self.asr_final_received.wait(wait_timeout):
                print(f"【识别完成】最终结果：{self.asr_result}")
            else:
                print(f"【识别超时】超过{wait_timeout}秒未收到结果")

        except WebSocketException as e:
            print(f"❌ WebSocket错误：{str(e)}")
        except Exception as e:
            print(f"❌ 语音识别异常：{str(e)}")
        finally:
            # 6. 安全关闭连接
            self._close_connection()

        return self.asr_result.strip()  # 确保返回字符串

    def _recv_msg(self):
        while self.is_connected and self.ws:
            try:
                msg = self.ws.recv()
                if not msg:
                    print("【接收消息】服务端关闭连接")
                    self.listening = False
                    self.asr_final_received.set()
                    break

                if isinstance(msg, str):
                    data = json.loads(msg)

                    # 仅当存在code且code != 0时才视为错误（关键修改）
                    error_code = data.get("code")
                    if error_code is not None and error_code != 0:
                        print(f"❌ 服务端错误：{data.get('desc')}（code={error_code}）")
                        self.listening = False
                        self.asr_final_received.set()
                        break

                    # 提取会话ID（使用服务端返回的sessionId）
                    if "sessionId" in data.get("data", {}):
                        self.session_id = data["data"]["sessionId"]

                    # 解析转写结果（按官方格式）
                    if data.get("msg_type") == "result" and data.get("res_type") == "asr":
                        result_data = data.get("data", {})
                        cn_data = result_data.get("cn", {})
                        st_data = cn_data.get("st", {})
                        rt_list = st_data.get("rt", [])

                        for rt in rt_list:
                            ws_list = rt.get("ws", [])
                            for ws in ws_list:
                                cw_list = ws.get("cw", [])
                                for cw in cw_list:
                                    self.asr_result += cw.get("w", "")  # 拼接识别的词

                        # 检查是否为最终结果（ls=true）
                        if result_data.get("ls", False):
                            print(f"✅ 最终识别：{self.asr_result}")
                            self.listening = False
                            self.asr_final_received.set()

            except json.JSONDecodeError:
                print(f"【接收异常】非JSON消息：{msg[:50]}...")
            except WebSocketException as e:
                print(f"【接收异常】连接中断：{str(e)}")
                self.listening = False
                self.asr_final_received.set()
                break

    def _send_audio_stream(self):
        if not self.is_connected or not self.ws:
            print("【发送失败】连接未建立")
            return

        p = pyaudio.PyAudio()
        # 使用40ms帧大小（1280字节）
        frame_size = 1280
        stream = p.open(
            format=self.FORMAT,
            channels=self.CHANNELS,
            rate=self.RATE,
            input=True,
            frames_per_buffer=frame_size
        )

        try:
            # 开始录音前先设置宠物状态为speaking
            if hasattr(self, 'pet_window') and self.pet_window:
                self.pet_window.set_state('speaking')
            
            self._tts_speak("我在，请说说你的需求")
            # 切换宠物状态到思考状态
            if hasattr(self, 'pet_window') and self.pet_window:
                self.pet_window.set_state('thinking')
            print("【音频发送】开始录音")
            start_time = time.time()
            
            # 创建用于声纹验证的音频数据列表
            voice_frames = []
            
            # 录音时长：5秒（足够说一句话）
            while self.listening and (time.time() - start_time < 5):
                audio_data = stream.read(frame_size, exception_on_overflow=False)
                self.ws.send_binary(audio_data)
                # 同时保存音频数据用于声纹验证
                voice_frames.append(audio_data)
                time.sleep(0.04)  # 40ms/帧

            # 保存音频用于声纹验证
            if voice_frames:
                self._save_voice_for_verification(voice_frames)
            
            # 发送结束标识
            if self.session_id:
                end_signal = json.dumps({
                    "end": True,
                    "sessionId": self.session_id
                })
                self.ws.send(end_signal)
                print("【音频发送】结束标识已发送")
            else:
                print("【发送警告】未获取到sessionId，无法发送结束标识")

        except Exception as e:
            print(f"【发送异常】{str(e)}")
        finally:
            stream.stop_stream()
            stream.close()
            p.terminate()

    """安全关闭WebSocket连接"""

    def _close_connection(self):
        if self.is_connected and self.ws:
            self.is_connected = False
            self.listening = False
            try:
                if self.ws.connected:
                    self.ws.close(status=1000, reason="正常关闭")
                print("【连接关闭】WebSocket已安全关闭")
            except Exception as e:
                print(f"【关闭异常】{str(e)}")
        self.ws = None  # 释放连接对象

    def _save_voice_for_verification(self, voice_frames):
        """
        保存音频用于声纹验证
        """
        try:
            import wave
            import os
            
            # 确保temp_audio目录存在
            temp_audio_dir = os.path.join(os.path.dirname(__file__), 'temp_audio')
            if not os.path.exists(temp_audio_dir):
                os.makedirs(temp_audio_dir)
                
            # 生成文件名
            filename = f"voice_verification_{time.strftime('%Y%m%d_%H%M%S')}"
            wav_filename = os.path.join(temp_audio_dir, f"{filename}.wav")
            
            # 保存为WAV文件
            wf = wave.open(wav_filename, 'wb')
            wf.setnchannels(self.CHANNELS)
            wf.setsampwidth(pyaudio.PyAudio().get_sample_size(self.FORMAT))
            wf.setframerate(self.RATE)
            wf.writeframes(b''.join(voice_frames))
            wf.close()
            
            print(f"✅ 音频已保存用于声纹验证: {wav_filename}")
            
            # 转换为MP3格式供声纹识别使用
            from pydub import AudioSegment
            audio = AudioSegment.from_wav(wav_filename)
            mp3_filename = wav_filename.replace('.wav', '.mp3')
            audio.export(mp3_filename, format="mp3")
            
            print(f"✅ 音频已转换为MP3格式: {mp3_filename}")
            
            # 保存文件路径供后续声纹识别使用
            self.voice_verification_file = mp3_filename
            
        except Exception as e:
            print(f"❌ 保存音频用于声纹验证时发生错误: {e}")

    def _perform_voice_recognition_from_file(self, voice_file):
        """
        从文件执行声纹识别
        """
        try:
            if not voice_file:
                return None

            # 使用讯飞API进行声纹识别
            result = self.generalRequest.req_url(
                api_name='searchFea',
                APPId=self.voice_test['APPId'],
                APIKey=self.voice_test['APIKey'],
                APISecret=self.voice_test['APISecret'],
                file_path=voice_file,
                group_id=self.voice_test['groupId']
            )

            # 检查响应结果
            if result.get('header', {}).get('code') != 0:
                print(f"❌ 声纹识别错误: {result.get('header', {}).get('message', '未知错误')}")
                return None

            # 解析返回的结果
            payload = result.get('payload', {})
            search_fea_res = payload.get('searchFeaRes', {})
            text_content = search_fea_res.get('text')

            if not text_content:
                print("❌ 未找到声纹识别结果")
                return None

            # 解析scoreList
            import json
            score_data = json.loads(text_content)
            score_list = score_data.get('scoreList', [])

            if not score_list:
                print("❌ 声纹识别返回空列表")
                return None

            # 获取最高分的用户
            best_match = max(score_list, key=lambda x: x.get('score', 0))
            score = best_match.get('score', 0)
            feature_info = best_match.get('featureInfo', '')

            # 判断是否通过验证 (score > 0.6)
            if score >= 0.6:
                print(f"✅ 声纹验证通过，用户: {feature_info} (相似度: {score:.4f})")
                return feature_info
            else:
                print(f"❌ 声纹验证失败，最高相似度: {score:.4f}")
                return None

        except Exception as e:
            print(f"❌ 声纹识别过程中出现错误: {e}")
            return None

    def _cleanup_voice_files(self):
        """
        清理声纹验证音频文件
        """
        if self.voice_verification_file and os.path.exists(self.voice_verification_file):
            try:
                os.remove(self.voice_verification_file)
                wav_file = self.voice_verification_file.replace('.mp3', '.wav')
                if os.path.exists(wav_file):
                    os.remove(wav_file)
                self.voice_verification_file = None
            except Exception as e:
                print(f"⚠️  删除声纹验证音频文件时出错: {e}")

    def _build_xfyun_url(self):
        import uuid
        import datetime

        app_id = XFYUN_APP_ID.strip()

        # 官方指定域名和路径
        host = "office-api-ast-dx.iflyaisol.com"
        path = "/ast/communicate/v1"
        scheme = "wss"

        # 生成UTC时间
        beijing_tz = datetime.timezone(datetime.timedelta(hours=8))
        utc_time = datetime.datetime.now(beijing_tz).strftime("%Y-%m-%dT%H:%M:%S%z")

        # 鉴权参数（仅保留官方要求的基础字段，移除domain和model）
        auth_params = {
            "accessKeyId": XFYUN_API_KEY.strip(),
            "appId": app_id,
            "uuid": uuid.uuid4().hex,
            "utc": utc_time,
            "audio_encode": "pcm_s16le",
            "samplerate": str(self.RATE),
            "lang": "autodialect"  # 新增：必填参数，支持中英+方言
        }

        # 过滤空值并排序
        sorted_params = dict(sorted([
            (k, v) for k, v in auth_params.items()
            if v is not None and str(v).strip() != ""
        ]))

        # 生成签名（逻辑不变）
        base_str = "&".join([
            f"{quote(k, safe='')}={quote(v, safe='')}"
            for k, v in sorted_params.items()
        ])
        signature = hmac.new(
            XFYUN_API_SECRET.encode("utf-8"),
            base_str.encode("utf-8"),
            digestmod=hashlib.sha1
        ).digest()
        sorted_params["signature"] = base64.b64encode(signature).decode("utf-8")

        # 拼接URL
        query = urlencode(sorted_params)
        url = f"{scheme}://{host}{path}?{query}"
        return url

    """语音播报"""

    def _tts_speak(self, text):
        with self.speaking_lock:
            self.is_speaking = True
        try:
            speak_text(text)
            time.sleep(1.0)  # 增加延迟到1秒以更好地减少回音干扰
        finally:
            with self.speaking_lock:
                self.is_speaking = False

    """启动浏览器（保留，供工具调用）"""

    """发送用户身份信息到前端（保留，供工具调用）"""

    def _send_user_identity(self):
        # 确定用户角色和ID
        user_id = self.current_user if self.current_user else "guest"
        user_role = "访客"

        # 如果用户已认证，从数据库获取详细身份信息
        if self.user_authenticated and self.current_user:
            try:
                from Impl.user_title_impl import UserTitleImpl
                from Impl.wav_impl import WavImpl
                from config import DB_CONFIG
                
                # 首先获取用户ID
                with WavImpl(DB_CONFIG) as wav_impl:
                    wav_files = wav_impl.get_wav_by_user_english_name(self.current_user)
                    if wav_files:
                        db_user_id = wav_files[0].id
                        
                        # 然后获取用户的所有业务身份
                        with UserTitleImpl(DB_CONFIG) as user_title_impl:
                            user_titles = user_title_impl.get_titles_by_user_id(db_user_id)
                            
                            if user_titles:
                                # 如果用户有多个身份，组合显示
                                roles = [f"{title.title_name}({title.level})" for title in user_titles]
                                user_role = "、".join(roles)
                            else:
                                user_role = "已认证用户"
                    else:
                        user_role = "已认证用户"
            except Exception as e:
                print(f"⚠️ 获取用户详细身份信息时出错: {e}")
                user_role = "已认证用户"
        elif self.user_authenticated:
            user_role = "已认证用户"

        # 发送用户身份信息
        identity_message = {
            "type": "user_info",
            "user_id": user_id,
            "user_role": user_role,
            "timestamp": time.time()
        }
        self.websocket_server.send_message(json.dumps(identity_message))
        print(f"📤 已向前端发送用户身份信息: {user_id} ({user_role})")

    """发送消息到前端（保留，供工具调用）"""

    def _send_message_to_frontend(self, text, user_id):
        # 确定用户角色
        user_role = "访客"
        if self.user_authenticated:
            user_role = "已认证用户"
        message = {
            "type": "query",
            "text": text,
            "user_id": user_id if user_id else "guest",
            "user_role": user_role,
            "timestamp": time.time()
        }
        self.websocket_server.send_message(json.dumps(message))


    """上传WAV文件到数据库"""
    def _upload_wav_to_database(self, wav_filename):
        try:
            from Mapper.wav_mapper import WavFile
            from config import DB_CONFIG
            from Impl.wav_impl import WavImpl
            import os
            import time
            
            # 读取WAV文件数据
            with open(wav_filename, 'rb') as f:
                wav_file_data = f.read()
            
            # 创建WavFile对象
            wav_file_obj = WavFile(
                user_english_name=self.current_user if self.current_user else 'guest',
                user_chinese_name='未知用户' if not self.current_user else self.current_user,
                user_identity='访客' if not self.user_authenticated else '已认证用户',
                audio_type='1',  # 语音验证
                file_data=wav_file_data,
                upload_time=time.strftime('%Y-%m-%d %H:%M:%S')
            )

            # 插入到数据库
            with WavImpl(DB_CONFIG) as wav_impl:
                wav_impl.insert_wav(wav_file_obj)
                print(f"✅ WAV文件已上传到数据库: {wav_filename}")
                
        except Exception as e:
            print(f"❌ 上传WAV文件到数据库时发生错误: {e}")