import pyaudio
import collections
import sys
import threading
import time
import json
import os
import websocket
import base64
import gzip
import hmac
import uuid
from hashlib import sha256
from urllib.parse import urlparse, urlencode

import webrtcvad
# import nls # nls is now imported by ali_tts_agent
# from aliyunsdkcore.client import AcsClient # AcsClient is now imported by ali_tts_agent
# from aliyunsdkcore.request import CommonRequest # CommonRequest is now imported by ali_tts_agent

# Import AliTTSAgent
from tools.builtin.ali_tts_agent import AliTTSAgent

# 从 settings.txt 读取配置
def load_settings():
    try:
        with open('settings.txt', 'r', encoding='utf-8') as f:
            settings = json.load(f)
        return settings
    except FileNotFoundError:
        print("错误：settings.txt 未找到。")
        return {}
    except json.JSONDecodeError:
        print("错误：无法从 settings.txt 解码 JSON。")
        return {}

settings = load_settings()

# ASR 配置 (Doubao)
ASR_FORMAT = pyaudio.paInt16
ASR_CHANNELS = 1
ASR_RATE = 16000
# VAD 配置
VAD_FRAME_DURATION_MS = 30 # webrtcvad supports 10, 20, or 30ms frames
VAD_SAMPLE_RATE = 16000
VAD_MODE = 3  # 0-3, 3 is most aggressive
VAD_PADDING_DURATION_MS = 300 # 300ms of silence before and after speech

ASR_CHUNK = int(VAD_SAMPLE_RATE * VAD_FRAME_DURATION_MS / 1000) # Match VAD frame size (480 samples)

# Doubao ASR Protocol Constants
PROTOCOL_VERSION = 0b0001
DEFAULT_HEADER_SIZE = 0b0001
MESSAGE_TYPE_BITS = 4
MESSAGE_TYPE_SPECIFIC_FLAGS_BITS = 4
MESSAGE_SERIALIZATION_BITS = 4
MESSAGE_COMPRESSION_BITS = 4
RESERVED_BITS = 8

# Message Type:
CLIENT_FULL_REQUEST = 0b0001
CLIENT_AUDIO_ONLY_REQUEST = 0b0010
SERVER_FULL_RESPONSE = 0b1001
SERVER_ACK = 0b1011
SERVER_ERROR_RESPONSE = 0b1111

# Message Serialization
NO_SERIALIZATION = 0b0000
JSON = 0b0001

# Message Compression
NO_COMPRESSION = 0b0000
GZIP = 0b0001

last_final_text = ""

class DoubaoAsrClient:
    def __init__(self, app_id, access_token, secret_key):
        self.last_final_text = ""
        self.app_id = app_id
        self.access_token = access_token
        self.secret_key = secret_key
        self.cluster = "volcengine_input_common"
        self.base_url = "wss://openspeech.bytedance.com/api/v2/asr"
        self.uid = str(uuid.uuid4())

        self.ws = None
        self.connected = False
        self.full_text_result = ""
        self.audio_queue = collections.deque()
        self.audio_queue_lock = threading.Lock()
        self.send_audio_thread = None
        self.recognized_text_queue = collections.deque()
        self.recognized_text_queue_lock = threading.Lock()
        #手都关闭 ，忽略一次警告
        self.manul_close = 0


    def _construct_asr_request(self, msg_type, payload, serialization=JSON, compression=GZIP):
        header = bytearray(4)
        header[0] = (PROTOCOL_VERSION << 4) | DEFAULT_HEADER_SIZE
        header[1] = (msg_type << 4) | 0
        header[2] = (serialization << 4) | compression
        header[3] = 0

        if payload:
            if compression == GZIP:
                payload = gzip.compress(payload)
            header.extend(len(payload).to_bytes(4, 'big'))
            header.extend(payload)

        return header

    def _generate_asr_auth_header(self, request_bytes):
        header_dicts = {
            'Custom': 'auth_custom',
        }
        url_path = urlparse(self.base_url).path
        input_str = f'GET {url_path} HTTP/1.1\n'
        auth_headers = 'Custom'
        for header in auth_headers.split(','):
            input_str += f'{header_dicts[header]}\n'
        input_data = bytearray(input_str, 'utf-8')
        input_data.extend(request_bytes)

        signature = hmac.new(self.secret_key.encode('utf-8'), input_data, sha256).digest()
        signature_b64 = base64.urlsafe_b64encode(signature).decode('utf-8')

        header_dicts['Authorization'] = f'HMAC256; access_token="{self.access_token}"; mac="{signature_b64}"; h="{auth_headers}"'
        return header_dicts

    def _parse_asr_response(self, message):
        try:
            msg_type = message[1] >> 4

            if msg_type == SERVER_ERROR_RESPONSE:
                error_code = int.from_bytes(message[4:8], "big", signed=False)
                error_msg_size = int.from_bytes(message[8:12], "big", signed=False)
                error_msg_bytes = message[12:12 + error_msg_size]

                compression_type = (message[2] & 0x0f)
                try:
                    if compression_type == GZIP:
                        error_msg_bytes = gzip.decompress(error_msg_bytes)
                    error_msg = error_msg_bytes.decode('utf-8')
                except (gzip.BadGzipFile, UnicodeDecodeError) as e:
                    error_msg = f"Failed to decompress or decode error message: {e}. Raw bytes: {error_msg_bytes.hex()}"
                #print(f"ASR 服务器错误: 错误码={error_code}, 消息={error_msg}, 原始数据: {message.hex()}")
                return None, True
            elif msg_type == SERVER_FULL_RESPONSE:
                payload_size = int.from_bytes(message[4:8], "big", signed=True)
                payload = message[8:8 + payload_size]

                compression_type = (message[2] & 0x0f)
                if compression_type == GZIP:
                    payload = gzip.decompress(payload)

                result = json.loads(payload)

                is_final = False
                text = ""
                if "result" in result and result["result"]:
                    for utterance in result["result"]:
                        if "text" in utterance:
                            text += utterance["text"]

                    if "sequence" in result and result["sequence"] < 0:
                        is_final = True
                return text, is_final
            elif msg_type == SERVER_ACK:
                return None, False
            else:
                print(f"收到未知 ASR 消息类型: {bin(msg_type)}")
                return None, True
        except (IndexError, json.JSONDecodeError, gzip.BadGzipFile) as e:
            print(f"Error parsing ASR response: {e}")
            print(f"Raw message: {message.hex()}")
            return None, True

    def on_message(self, ws, message):
        if isinstance(message, bytes):
            text, is_final = self._parse_asr_response(message)
            if text:
                sys.stdout.write(f"\r>> {self.full_text_result}{text} ...")
                sys.stdout.flush()
                self.last_final_text = self.full_text_result + text
            if is_final and text:
                self.full_text_result += text
                print(f"\nFinal Recognized Text: {self.full_text_result}")
                #with self.recognized_text_queue_lock:
                #    self.recognized_text_queue.append(self.full_text_result)
                #self.full_text_result = ""
        else:
            print(f"收到来自 ASR 的意外文本消息: {message}")

    def on_error(self, ws, error):
        self.manul_close -= 1
        if self.manul_close<0:
            print(f"WebSocket 错误 (豆包 ASR): 类型={type(error)}, 错误={error}")
            self.manul_close = 0
        else:
            self.manul_close =1

        self.connected = False

    def on_close(self, ws, close_status_code, close_msg):
        self.manul_close -= 1
        if self.manul_close<0:
            print(f"WebSocket 已关闭 (豆包 ASR)，状态码: {close_status_code}, 消息: {close_msg}")
            self.manul_close = 0
        else:
            self.manul_close =1
        self.connected = False

    def on_open_with_initial_request(self, ws, initial_request_bytes):
        #print("WebSocket 已打开 (豆包 ASR)。")
        self.connected = True
        self.ws.send(initial_request_bytes, websocket.ABNF.OPCODE_BINARY)
        #print("ASR 启动请求已发送。")

    def _generate_asr_start_request_bytes(self):
        req = {
            "app": {
                "appid": self.app_id,
                "token": self.access_token,
                "cluster": self.cluster
            },
            "user": {
                "uid": self.uid
            },
            "audio": {
                "format": "pcm",
                "rate": ASR_RATE,
                "bits": 16,
                "channel": ASR_CHANNELS,
                "language": "zh-CN",
            },
            "request": {
                "reqid": str(uuid.uuid4()),
                "sequence": 1
            }
        }
        payload_bytes = json.dumps(req).encode('utf-8')
        full_request_bytes = self._construct_asr_request(CLIENT_FULL_REQUEST, payload_bytes)
        return full_request_bytes

    def connect(self):
        initial_request_bytes = self._generate_asr_start_request_bytes()
        auth_header = self._generate_asr_auth_header(initial_request_bytes)

        self.ws = websocket.WebSocketApp(
            self.base_url,
            header=auth_header,
            on_open=lambda ws: self.on_open_with_initial_request(ws, initial_request_bytes),
            on_message=self.on_message,
            on_error=self.on_error,
            on_close=self.on_close
        )
        self.wst = threading.Thread(target=self.ws.run_forever, daemon=True)
        self.wst.start()
        timeout = 10
        start_time = time.time()
        while not self.connected and time.time() - start_time < timeout:
            time.sleep(0.1)
        if not self.connected:
            raise Exception("连接豆包 ASR WebSocket 服务器失败。")

    def send_audio_stream(self):
        while self.connected or len(self.audio_queue) > 0:
            audio_chunk = None
            with self.audio_queue_lock:
                if self.audio_queue:
                    audio_chunk = self.audio_queue.popleft()

            if audio_chunk:
                audio_request = self._construct_asr_request(
                    CLIENT_AUDIO_ONLY_REQUEST,
                    audio_chunk,
                    serialization=NO_SERIALIZATION,
                    compression=NO_COMPRESSION
                )
                try:
                    self.ws.send(audio_request, websocket.ABNF.OPCODE_BINARY)
                except websocket.WebSocketConnectionClosedException:
                    print("无法发送音频，ASR 连接已关闭。")
                    break
            else:
                time.sleep(0.01)

    def start_sending_audio(self):
        if self.send_audio_thread is None or not self.send_audio_thread.is_alive():
            self.send_audio_thread = threading.Thread(target=self.send_audio_stream, daemon=True)
            self.send_audio_thread.start()

    def send_audio(self, audio_data):
        with self.audio_queue_lock:
            self.audio_queue.append(audio_data)

    def send_asr_end_request(self):
        if not self.connected:
            print("无法发送结束请求，ASR 未连接。")
            return
        #print("\n正在发送 ASR 结束请求...")
        req = {
            "app": {
                "appid": self.app_id,
                "token": self.access_token,
                "cluster": self.cluster
            },
            "user": {
                "uid": self.uid
            },
            "request": {
                "reqid": str(uuid.uuid4()),
                "sequence": -1
            }
        }
        payload_bytes = json.dumps(req).encode('utf-8')
        full_request = self._construct_asr_request(CLIENT_FULL_REQUEST, payload_bytes)
        try:
            self.ws.send(full_request, websocket.ABNF.OPCODE_BINARY)
        except websocket.WebSocketConnectionClosedException:
            print("无法发送 ASR 结束请求，连接已关闭。")

    def close(self):
        self.manul_close +=2
        with self.recognized_text_queue_lock:
            self.recognized_text_queue.append(self.last_final_text)
            self.last_final_text = ""
        self.send_asr_end_request()
        if self.send_audio_thread and self.send_audio_thread.is_alive():
            self.send_audio_thread.join(timeout=2)
        if self.ws and self.connected:
            self.ws.close()
        if hasattr(self, 'wst') and self.wst and self.wst.is_alive():
            self.wst.join(timeout=2)

class AudioRecorder:
    def __init__(self, chunk_size, format, channels, rate):
        self.chunk_size = chunk_size
        self.format = format
        self.channels = channels
        self.rate = rate
        self.audio = pyaudio.PyAudio()
        self.stream = None
        self.frames = collections.deque() # Use deque for efficient popleft
        self.is_recording = False
        self.record_thread = None

    def start_recording(self):
        if self.is_recording:
            return

        self.frames.clear()
        self.stream = self.audio.open(format=self.format,
                                      channels=self.channels,
                                      rate=self.rate,
                                      input=True,
                                      frames_per_buffer=self.chunk_size)
        self.is_recording = True
        self.record_thread = threading.Thread(target=self._record_loop)
        self.record_thread.start()
        #print("录音已开始...")

    def _record_loop(self):
        while self.is_recording:
            try:
                data = self.stream.read(self.chunk_size, exception_on_overflow=False)
                self.frames.append(data)
            except IOError as e: # Catch specific PyAudio errors
                print(f"Error reading audio stream: {e}")
                break
            except Exception as e: # Catch other unexpected errors
                print(f"An unexpected error occurred in recording loop: {e}")
                break

    def stop_recording(self):
        if not self.is_recording:
            return

        self.is_recording = False
        if self.record_thread:
            self.record_thread.join() # Wait for recording thread to finish
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
        #print("Recording stopped.")

    def terminate(self):
        self.audio.terminate()

class VoiceActivityDetector:
    def __init__(self, mode, sample_rate, frame_duration_ms):
        self.vad = webrtcvad.Vad(mode)
        self.sample_rate = sample_rate
        self.frame_duration_ms = frame_duration_ms
        self.frame_size = int(sample_rate * frame_duration_ms / 1000) # Number of samples per frame

    def is_speech(self, audio_frame):
        # webrtcvad.Vad.is_speech expects frame in bytes
        # frame_size * 2 because paInt16 is 2 bytes per sample
        if len(audio_frame) != self.frame_size * 2:
            raise ValueError(f"Audio frame must be {self.frame_size * 2} bytes for {self.frame_duration_ms}ms at {self.sample_rate}Hz (paInt16). Got {len(audio_frame)} bytes.")
        return self.vad.is_speech(audio_frame, self.sample_rate)

def main():
    # Check if Doubao ASR API keys are configured
    doubao_asr_app_id = settings.get("volcengine_voice_appid")
    doubao_asr_access_token = settings.get("volcengine_voice_access_token")
    doubao_asr_secret_key = settings.get("asr_secret_key")

    asr_client = DoubaoAsrClient(doubao_asr_app_id, doubao_asr_access_token, doubao_asr_secret_key)
    tts_agent = AliTTSAgent() # Initialize AliTTSAgent
    recorder = AudioRecorder(ASR_CHUNK, ASR_FORMAT, ASR_CHANNELS, ASR_RATE)
    vad_detector = VoiceActivityDetector(VAD_MODE, VAD_SAMPLE_RATE, VAD_FRAME_DURATION_MS)

    print("按空格键开始录音。再次按空格键停止录音并处理语音，或等待30秒静默超时。按 Ctrl + C 退出。")
    #tts_agent.synthesize_and_play("欢迎使用语音助手。")

    # VAD related variables
    ring_buffer = collections.deque(maxlen=VAD_PADDING_DURATION_MS // VAD_FRAME_DURATION_MS)
    triggered = False # True when speech is detected and ASR is active
    is_listening_for_speech = False # New flag to indicate if we are actively listening for speech (manual mode)
    last_speech_time = time.time() # To track silence for timeout

    # Thread for continuous audio processing
    audio_processing_thread = None
    stop_processing_event = threading.Event()
    timeout_occurred = False # New flag to indicate if silence timeout occurred

    def audio_processing_loop():
        nonlocal triggered, is_listening_for_speech, last_speech_time, timeout_occurred
        while not stop_processing_event.is_set():
            if recorder.is_recording and recorder.frames:
                frame = recorder.frames.popleft()
                
                try:
                    is_speech = vad_detector.is_speech(frame)
                except ValueError as e:
                    print(f"VAD error: {e}")
                    continue # Skip this frame if it's malformed

                if not triggered:
                    ring_buffer.append((frame, is_speech))
                    # Count voiced frames in the ring buffer
                    num_voiced = sum(1 for _, speech in ring_buffer if speech)

                    # If enough speech is detected, start ASR
                    if num_voiced >= ring_buffer.maxlen * 0.9: # At least 90% of padding is speech
                        triggered = True
                        asr_client.connect() # Connect ASR when speech starts
                        asr_client.start_sending_audio()
                        # Send all frames in the ring buffer (including initial silence) to ASR
                        for f, _ in ring_buffer:
                            asr_client.send_audio(f)
                        ring_buffer.clear()
                else: # ASR is already triggered
                    asr_client.send_audio(frame) # Send current speech frame to ASR
                    if is_speech:
                        last_speech_time = time.time() # Update last speech time
                    
                    # If we are in manual listening mode and 30 seconds of silence has passed
                    if is_listening_for_speech and (time.time() - last_speech_time > 30):
                        print("\n30 seconds of silence detected. Stopping ASR and processing.")
                        nonlocal timeout_occurred
                        timeout_occurred = True
                        recorder.stop_recording() # Stop recording on timeout
                        stop_processing_event.set() # Signal to stop the audio processing loop
                        handle_asr_stop_and_tts() # Handle ASR stop and TTS on timeout
                        break # Exit the audio processing loop
            else:
                time.sleep(0.01) # Small delay if no audio or not recording

    def on_key_press_space():
        nonlocal triggered, is_listening_for_speech, last_speech_time, timeout_occurred

        if not recorder.is_recording:
            #print("Starting recording...")
            print()
            sys.stdout.write(f"\r>> （语音）...")
            sys.stdout.flush()
            recorder.start_recording()
            # Start audio processing thread if not already running
            nonlocal audio_processing_thread
            if audio_processing_thread is None or not audio_processing_thread.is_alive():
                stop_processing_event.clear()
                audio_processing_thread = threading.Thread(target=audio_processing_loop)
                audio_processing_thread.start()
            
            # Connect ASR and start sending audio
            asr_client.connect()
            asr_client.start_sending_audio()
            is_listening_for_speech = True
            last_speech_time = time.time() # Reset last speech time when starting
            triggered = True # ASR is now actively triggered by manual start
            #print("语音识别已启动。再次按下空格键停止，或等待30秒静默超时。")
            #print("...")
        else:
            #print("Stopping recording...")
            recorder.stop_recording()
            is_listening_for_speech = False # Stop listening for speech
            
            # Stop audio processing thread
            stop_processing_event.set()
            if audio_processing_thread and audio_processing_thread.is_alive():
                audio_processing_thread.join()
            audio_processing_thread = None

            # Call a new function to handle ASR stopping and TTS playing
            handle_asr_stop_and_tts()
            ring_buffer.clear()

    def handle_asr_stop_and_tts():
        nonlocal triggered, is_listening_for_speech, timeout_occurred
        if triggered: # Only close ASR if it was actually triggered
            triggered = False
            is_listening_for_speech = False # Ensure this is reset
            asr_client.close()
            final_text = ""
            # Wait for ASR result to be processed
            wait_start_time = time.time()
            while not asr_client.recognized_text_queue and (time.time() - wait_start_time < 5): # Wait up to 5 seconds for final text
                time.sleep(0.1)

            with asr_client.recognized_text_queue_lock:
                if asr_client.recognized_text_queue:
                    final_text = asr_client.recognized_text_queue.popleft()
            
            print(f"\n> {final_text}")
            if final_text:
                tts_agent.synthesize_and_play(f"您说的是：{final_text}")
            else:
                tts_agent.synthesize_and_play("没有识别到内容。")
            #print("synthesize_and_play 已停止。")
            timeout_occurred = False # Reset timeout flag after handling

    import keyboard

    # This flag will be controlled by the ESC hotkey to signal exit.
    exit_app = False

    def on_esc_pressed():
        nonlocal exit_app
        print("\nESC pressed. Preparing to exit...")
        exit_app = True

    #tts_agent.synthesize_and_play("开始测试")
    # Register hotkeys
    keyboard.add_hotkey('space', on_key_press_space)
    keyboard.add_hotkey('enter', on_key_press_space)
    #keyboard.add_hotkey('esc', on_esc_pressed)

    print("Application is running. Press SPACE to record, Ctrl + C to exit.")

    # Main loop to keep the application alive.
    # It will only exit when the 'exit_app' flag is set to True by the ESC hotkey.
    while not exit_app:
        time.sleep(0.1) # A short sleep to prevent the loop from consuming 100% CPU.

    # --- Cleanup sequence ---
    print("Starting graceful shutdown...")

    # Unhook all hotkeys to prevent any further keyboard events.
    keyboard.unhook_all_hotkeys()

    # Signal the audio processing thread to stop.
    stop_processing_event.set()
    if audio_processing_thread and audio_processing_thread.is_alive():
        print("Waiting for audio processing thread to finish...")
        audio_processing_thread.join(timeout=2)

    # Ensure the recorder is stopped.
    if recorder.is_recording:
        print("Stopping active recording...")
        recorder.stop_recording()
    
    # Terminate the PyAudio instance.
    recorder.terminate()

    # Close the ASR client connection.
    print("Closing ASR client...")
    asr_client.close()

    print("Cleanup complete. Goodbye!")

if __name__ == "__main__":
    main()