import hashlib
import hmac
import base64
import time
import json
import threading
import pyaudio
from websocket import create_connection
from urllib.parse import quote



def gen_signa(appid, apikey, ts):
    """生成WebSocket签名"""
    base_string = appid + ts
    md5_str = hashlib.md5(base_string.encode('utf-8')).hexdigest()
    signa = hmac.new(apikey.encode('utf-8'), md5_str.encode('utf-8'), digestmod='sha1').digest()
    signa = base64.b64encode(signa).decode('utf-8')
    return signa


class XFRealtimeASR:
    def __init__(self, appid, apikey, vad_eos_ms=1500):
        self.appid = appid
        self.apikey = apikey
        self.vad_eos_ms = vad_eos_ms  # 静音结束阈值（单位毫秒）
        self.ws = None
        self.handshake_done = threading.Event()
        self.closed = False
        self.end_tag = "{\"end\":true}"
        self.result_text = ""  # 用于存储识别结果
        self.last_result_time = None  # 用于跟踪最后的识别结果时间
        self.record_start_time = None  # 记录开始计时的时间

    def connect(self):
        ts = str(int(time.time()))
        signa = gen_signa(self.appid, self.apikey, ts)
        url = (
            f"ws://rtasr.xfyun.cn/v1/ws?"
            f"appid={self.appid}&ts={ts}&signa={quote(signa)}"
            f"&vad_eos={self.vad_eos_ms}"
        )
        print("连接URL：", url)
        self.ws = create_connection(url)
        self.recv_thread = threading.Thread(target=self._recv_thread_func, daemon=True)
        self.recv_thread.start()

    def start_mic(self):
        """开始麦克风输入并返回完整转写结果"""
        if not self.handshake_done.wait(10):
            print("握手超时或失败，无法推送音频！")
            self.close()
            return ""

        CHUNK = 640
        FORMAT = pyaudio.paInt16
        CHANNELS = 1
        RATE = 16000

        pa = pyaudio.PyAudio()
        stream = pa.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
        print("【开始说话，Ctrl+C终止】")

        # 初始化计时器
        self.last_result_time = None  # 最后识别结果时间未开始
        self.record_start_time = None  # 记录开始计时的时间

        try:
            while not self.closed:
                # 读取音频数据
                data = stream.read(CHUNK, exception_on_overflow=False)
                self.ws.send(data, opcode=0x2)

                # 检测最大录制时长和静音结束
                if self.record_start_time is not None:
                    elapsed_time = time.time() - self.record_start_time
                    # 检查是否超过10秒
                    if elapsed_time >= 10:
                        print("超过10秒录制时间，强制结束本句")
                        self.ws.send(self.end_tag.encode('utf-8'))  # 发送结束标记
                        break

                    # 检查是否超过1.5秒静音
                    if (time.time() - self.last_result_time) > (self.vad_eos_ms / 1000):
                        print("超过2.5秒静音，强制结束本句")
                        self.ws.send(self.end_tag.encode('utf-8'))  # 发送结束标记
                        break

                time.sleep(0.04)  # 控制循环频率
        except KeyboardInterrupt:
            print("用户手动停止！")
        finally:
            print(self.result_text)
            # 此处开始接入大模型


            stream.stop_stream()
            stream.close()
            pa.terminate()
            self.close()

            # 等待接收线程处理完数据
            time.sleep(0.2)

            # 此处开始接入语音合成模块

            # 返回识别的最终结果
            # print("\n【完整转写结果】")
            # print(self.result_text)
            return self.result_text

    def parse_data_text(self, data_str):
        """解析讯飞返回的data字段（JSON字符串）中的文本内容"""
        try:
            data_dict = json.loads(data_str)
            if 'cn' in data_dict and 'st' in data_dict['cn'] and 'rt' in data_dict['cn']['st']:
                words = []
                for rt_item in data_dict['cn']['st']['rt']:
                    for ws in rt_item['ws']:
                        if 'cw' in ws:
                            for cw in ws['cw']:
                                words.append(cw['w'])  # 提取识别出的词
                return ''.join(words)
            return ""
        except Exception as e:
            print(f"解析文本出错: {e}")
            return ""

    def _recv_thread_func(self):
        try:
            while self.ws.connected:
                result = self.ws.recv()
                if not result:
                    print("【识别结束】")
                    break
                result_dict = json.loads(result)
                if result_dict["action"] == "started":
                    print("连接握手成功，开始实时识别！")
                    self.handshake_done.set()
                elif result_dict["action"] == "result":
                    data_str = result_dict.get("data", "")
                    if data_str:
                        text = self.parse_data_text(data_str)
                        if text:
                            self.result_text += text  # 更新识别结果
                            self.last_result_time = time.time()  # 更新最后一次识别结果的时间
                            if self.record_start_time is None:  # 第一次识别结果出现
                                self.record_start_time = time.time()  # 启动计时
                            print(f"【识别结果】{text}")
                elif result_dict["action"] == "error":
                    print("【讯飞返回错误】", result_dict)
                    self.handshake_done.set()
                    self.closed = True
                    self.ws.close()
                    break
        except Exception as e:
            if not self.closed:
                print("【接收线程异常】", e)

    def close(self):
        self.closed = True
        try:
            if self.ws:
                self.ws.close()
                print("WebSocket关闭")
        except:
            pass


def get_speech_to_text(appid, apikey):
    """直接调用该函数获取语音转文本结果"""
    asr = XFRealtimeASR(appid, apikey, vad_eos_ms=2500)  # 1.5秒静音
    asr.connect()
    response = asr.start_mic()
    return response



if __name__ == "__main__":
    # 请把下面的appid、apikey换成你自己的“实时语音转写”服务的，不要泄露
    appid = "9646138c"
    apikey = "82adf64eb83cf7b9ec6f4ae215943e0d"

    # 方式1：直接使用类
    asr = XFRealtimeASR(appid, apikey, vad_eos_ms=1500)
    asr.connect()
    result = asr.start_mic()

    # print("最终结果:", result)