#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
实时麦克风语音转文本模块
基于豆包语音识别API的实时语音转文本工具
"""

import asyncio
import aiohttp
import json
import struct
import gzip
import uuid
import logging
import threading
import queue
import time
from typing import Optional, List, Dict, Any, Tuple, AsyncGenerator, Callable
import pyaudio
import wave
import io
from dotenv import load_dotenv
import os
import subprocess

# 加载环境变量
load_dotenv()

# 统一日志配置（写入 run.log 并在控制台输出）
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('run.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# ===================== 以下为原 voice_recognition_demo 中的内联代码 =====================
# 常量定义
DEFAULT_SAMPLE_RATE = 16000

class ProtocolVersion:
    V1 = 0b0001

class MessageType:
    CLIENT_FULL_REQUEST = 0b0001
    CLIENT_AUDIO_ONLY_REQUEST = 0b0010
    SERVER_FULL_RESPONSE = 0b1001
    SERVER_ERROR_RESPONSE = 0b1111

class MessageTypeSpecificFlags:
    NO_SEQUENCE = 0b0000
    POS_SEQUENCE = 0b0001
    NEG_SEQUENCE = 0b0010
    NEG_WITH_SEQUENCE = 0b0011

class SerializationType:
    NO_SERIALIZATION = 0b0000
    JSON = 0b0001

class CompressionType:
    GZIP = 0b0001

class Config:
    """从环境变量加载语音识别鉴权配置"""
    def __init__(self):
        self.auth = self._load_config()
    
    def _load_config(self):
        try:
            app_key = os.getenv('VOICE_APP_KEY')
            access_key = os.getenv('VOICE_ACCESS_KEY')
            if not app_key:
                raise ValueError("VOICE_APP_KEY 环境变量未设置")
            if not access_key:
                raise ValueError("VOICE_ACCESS_KEY 环境变量未设置")
            return {"app_key": app_key, "access_key": access_key}
        except Exception as e:
            logger.error(f"Failed to load config: {e}")
            raise RuntimeError(f"Configuration loading failed: {e}")

    @property
    def app_key(self) -> str:
        return self.auth["app_key"]

    @property
    def access_key(self) -> str:
        return self.auth["access_key"]

config = Config()

class CommonUtils:
    @staticmethod
    def gzip_compress(data: bytes) -> bytes:
        return gzip.compress(data)

    @staticmethod
    def gzip_decompress(data: bytes) -> bytes:
        return gzip.decompress(data)

class AsrRequestHeader:
    def __init__(self):
        self.message_type = MessageType.CLIENT_FULL_REQUEST
        self.message_type_specific_flags = MessageTypeSpecificFlags.POS_SEQUENCE
        self.serialization_type = SerializationType.JSON
        self.compression_type = CompressionType.GZIP
        self.reserved_data = bytes([0x00])

    def with_message_type(self, message_type: int) -> 'AsrRequestHeader':
        self.message_type = message_type
        return self

    def with_message_type_specific_flags(self, flags: int) -> 'AsrRequestHeader':
        self.message_type_specific_flags = flags
        return self

    def with_serialization_type(self, serialization_type: int) -> 'AsrRequestHeader':
        self.serialization_type = serialization_type
        return self

    def with_compression_type(self, compression_type: int) -> 'AsrRequestHeader':
        self.compression_type = compression_type
        return self

    def with_reserved_data(self, reserved_data: bytes) -> 'AsrRequestHeader':
        self.reserved_data = reserved_data
        return self

    def to_bytes(self) -> bytes:
        header = bytearray()
        header.append((ProtocolVersion.V1 << 4) | 1)
        header.append((self.message_type << 4) | self.message_type_specific_flags)
        header.append((self.serialization_type << 4) | self.compression_type)
        header.extend(self.reserved_data)
        return bytes(header)

    @staticmethod
    def default_header() -> 'AsrRequestHeader':
        return AsrRequestHeader()

class RequestBuilder:
    @staticmethod
    def new_auth_headers() -> Dict[str, str]:
        reqid = str(uuid.uuid4())
        return {
            "X-Api-Resource-Id": "volc.bigasr.sauc.duration",
            "X-Api-Request-Id": reqid,
            "X-Api-Access-Key": config.access_key,
            "X-Api-App-Key": config.app_key
        }

    @staticmethod
    def new_full_client_request(seq: int) -> bytes:
        header = AsrRequestHeader.default_header() \
            .with_message_type_specific_flags(MessageTypeSpecificFlags.POS_SEQUENCE)
        
        payload = {
            "user": {"uid": "demo_uid"},
            "audio": {
                "format": "wav",
                "codec": "raw",
                "rate": 16000,
                "bits": 16,
                "channel": 1
            },
            "request": {
                "model_name": "bigmodel",
                "enable_itn": True,
                "enable_punc": True,
                "enable_ddc": True,
                "show_utterances": True,
                "enable_nonstream": False
            }
        }
        
        payload_bytes = json.dumps(payload).encode('utf-8')
        compressed_payload = CommonUtils.gzip_compress(payload_bytes)
        payload_size = len(compressed_payload)
        
        request = bytearray()
        request.extend(header.to_bytes())
        request.extend(struct.pack('>i', seq))
        request.extend(struct.pack('>I', payload_size))
        request.extend(compressed_payload)
        
        return bytes(request)

    @staticmethod
    def new_audio_only_request(seq: int, segment: bytes, is_last: bool = False) -> bytes:
        header = AsrRequestHeader.default_header()
        if is_last:
            header.with_message_type_specific_flags(MessageTypeSpecificFlags.NEG_WITH_SEQUENCE)
            seq = -seq
        else:
            header.with_message_type_specific_flags(MessageTypeSpecificFlags.POS_SEQUENCE)
        header.with_message_type(MessageType.CLIENT_AUDIO_ONLY_REQUEST)
        
        request = bytearray()
        request.extend(header.to_bytes())
        request.extend(struct.pack('>i', seq))
        
        compressed_segment = CommonUtils.gzip_compress(segment)
        request.extend(struct.pack('>I', len(compressed_segment)))
        request.extend(compressed_segment)
        
        return bytes(request)

class AsrResponse:
    def __init__(self):
        self.code = 0
        self.event = 0
        self.is_last_package = False
        self.payload_sequence = 0
        self.payload_size = 0
        self.payload_msg = None

    def to_dict(self) -> Dict[str, Any]:
        return {
            "code": self.code,
            "event": self.event,
            "is_last_package": self.is_last_package,
            "payload_sequence": self.payload_sequence,
            "payload_size": self.payload_size,
            "payload_msg": self.payload_msg
        }

class ResponseParser:
    @staticmethod
    def parse_response(msg: bytes) -> AsrResponse:
        response = AsrResponse()
        
        logger.debug(f"解析响应消息，长度: {len(msg)} 字节")
        logger.debug(f"原始消息前16字节: {msg[:16].hex() if len(msg) >= 16 else msg.hex()}")
        
        header_size = msg[0] & 0x0f
        message_type = msg[1] >> 4
        message_type_specific_flags = msg[1] & 0x0f
        serialization_method = msg[2] >> 4
        message_compression = msg[2] & 0x0f
        
        logger.debug(f"解析头部: header_size={header_size}, message_type={message_type}, flags={message_type_specific_flags}, serialization={serialization_method}, compression={message_compression}")
        
        payload = msg[header_size*4:]
        
        # 解析message_type_specific_flags
        if message_type_specific_flags & 0x01:
            response.payload_sequence = struct.unpack('>i', payload[:4])[0]
            payload = payload[4:]
        if message_type_specific_flags & 0x02:
            response.is_last_package = True
        if message_type_specific_flags & 0x04:
            response.event = struct.unpack('>i', payload[:4])[0]
            payload = payload[4:]
            
        # 解析message_type
        if message_type == MessageType.SERVER_FULL_RESPONSE:
            response.payload_size = struct.unpack('>I', payload[:4])[0]
            payload = payload[4:]
        elif message_type == MessageType.SERVER_ERROR_RESPONSE:
            response.code = struct.unpack('>i', payload[:4])[0]
            response.payload_size = struct.unpack('>I', payload[4:8])[0]
            payload = payload[8:]
            
        if not payload:
            return response
            
        # 解压缩
        if message_compression == CompressionType.GZIP:
            try:
                payload = CommonUtils.gzip_decompress(payload)
            except Exception as e:
                logger.error(f"Failed to decompress payload: {e}")
                return response
                
        # 解析payload
        try:
            if serialization_method == SerializationType.JSON:
                response.payload_msg = json.loads(payload.decode('utf-8'))
        except Exception as e:
            logger.error(f"Failed to parse payload: {e}")
            
        return response

# ===================== 内联结束，以下为原有实时语音转写逻辑 =====================

# 移除跨文件导入与路径注入（examples/voice_recognition_demo.py 已内联）
# 原有代码：
# import sys
# from pathlib import Path
# examples_path = Path(__file__).parent.parent.parent / "examples"
# sys.path.insert(0, str(examples_path))
# from voice_recognition_demo import (...)

class MicrophoneRecorder:
    """麦克风录音器"""
    
    def __init__(self, 
                 sample_rate: int = DEFAULT_SAMPLE_RATE,
                 channels: int = 1,
                 chunk_size: int = 1024,
                 format_type: int = pyaudio.paInt16):
        self.sample_rate = sample_rate
        self.channels = channels
        self.chunk_size = chunk_size
        self.format_type = format_type
        self.audio = None
        self.stream = None
        self.is_recording = False
        self.audio_queue = queue.Queue()
        self.record_thread = None
        
    def start_recording(self):
        """开始录音"""
        if self.is_recording:
            return
            
        try:
            self.audio = pyaudio.PyAudio()
            self.stream = self.audio.open(
                format=self.format_type,
                channels=self.channels,
                rate=self.sample_rate,
                input=True,
                frames_per_buffer=self.chunk_size
            )
            
            self.is_recording = True
            self.record_thread = threading.Thread(target=self._record_audio)
            self.record_thread.daemon = True
            self.record_thread.start()
            
            logger.info(f"开始录音: {self.sample_rate}Hz, {self.channels}声道")
            
        except Exception as e:
            logger.error(f"启动录音失败: {e}")
            raise
            
    def stop_recording(self):
        """停止录音"""
        if not self.is_recording:
            return
            
        self.is_recording = False
        
        if self.record_thread:
            self.record_thread.join(timeout=1.0)
            
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
            
        if self.audio:
            self.audio.terminate()
            
        logger.info("录音已停止")
        
    def _record_audio(self):
        """录音线程函数"""
        while self.is_recording:
            try:
                data = self.stream.read(self.chunk_size, exception_on_overflow=False)
                self.audio_queue.put(data)
            except Exception as e:
                logger.error(f"录音数据读取错误: {e}")
                break
                
    def get_audio_data(self) -> Optional[bytes]:
        """获取音频数据"""
        try:
            return self.audio_queue.get_nowait()
        except queue.Empty:
            return None
            
    def create_wav_header(self, data_length: int) -> bytes:
        """创建WAV文件头"""
        byte_rate = self.sample_rate * self.channels * 2  # 16位 = 2字节
        block_align = self.channels * 2
        
        header = bytearray()
        header.extend(b'RIFF')
        header.extend(struct.pack('<I', 36 + data_length))
        header.extend(b'WAVE')
        header.extend(b'fmt ')
        header.extend(struct.pack('<I', 16))  # fmt chunk size
        header.extend(struct.pack('<H', 1))   # audio format (PCM)
        header.extend(struct.pack('<H', self.channels))
        header.extend(struct.pack('<I', self.sample_rate))
        header.extend(struct.pack('<I', byte_rate))
        header.extend(struct.pack('<H', block_align))
        header.extend(struct.pack('<H', 16))  # bits per sample
        header.extend(b'data')
        header.extend(struct.pack('<I', data_length))
        
        return bytes(header)

class RealtimeAsrClient:
    """实时语音识别客户端"""
    
    def __init__(self, 
                 url: Optional[str] = None,
                 segment_duration: int = 200,
                 on_result: Optional[Callable[[str, bool], None]] = None,
                 on_error: Optional[Callable[[str], None]] = None):
        self.url = url or os.getenv("VOICE_WEBSOCKET_URL", "wss://openspeech.bytedance.com/api/v3/sauc/bigmodel_async")
        self.segment_duration = segment_duration
        self.seq = 1
        self.conn = None
        self.session = None
        self.recorder = MicrophoneRecorder()
        self.is_running = False
        self.audio_buffer = bytearray()
        self.segment_size = 0
        
        # 回调函数
        self.on_result = on_result or self._default_on_result
        self.on_error = on_error or self._default_on_error
        
    def _default_on_result(self, text: str, is_final: bool):
        """默认结果处理函数"""
        status = "[最终]" if is_final else "[临时]"
        print(f"{status} {text}")
        
    def _default_on_error(self, error: str):
        """默认错误处理函数"""
        print(f"错误: {error}")
        
    async def __aenter__(self):
        self.session = aiohttp.ClientSession()
        return self
        
    async def __aexit__(self, exc_type, exc, tb):
        await self.stop()
        if self.session and not self.session.closed:
            await self.session.close()
            
    async def create_connection(self):
        """创建WebSocket连接"""
        headers = RequestBuilder.new_auth_headers()
        try:
            self.conn = await self.session.ws_connect(self.url, headers=headers)
            logger.info(f"已连接到 {self.url}")
        except Exception as e:
            logger.error(f"WebSocket连接失败: {e}")
            raise
            
    async def send_full_client_request(self):
        """发送完整客户端请求"""
        request = RequestBuilder.new_full_client_request(self.seq)
        self.seq += 1
        
        try:
            await self.conn.send_bytes(request)
            logger.info(f"已发送完整客户端请求")
            
            # 等待响应
            msg = await self.conn.receive()
            if msg.type == aiohttp.WSMsgType.BINARY:
                response = ResponseParser.parse_response(msg.data)
                if response.code != 0:
                    raise Exception(f"服务器返回错误: {response.code}")
                logger.info("服务器响应正常")
            else:
                raise Exception(f"意外的消息类型: {msg.type}")
                
        except Exception as e:
            logger.error(f"发送完整客户端请求失败: {e}")
            raise
            
    def calculate_segment_size(self) -> int:
        """计算音频分段大小"""
        # 16位单声道，每秒字节数
        bytes_per_second = self.recorder.sample_rate * self.recorder.channels * 2
        # 每个分段的字节数
        segment_bytes = bytes_per_second * self.segment_duration // 1000
        return segment_bytes
        
    async def start(self):
        """开始实时语音识别"""
        if self.is_running:
            return
            
        try:
            # 1. 创建WebSocket连接
            await self.create_connection()
            
            # 2. 发送初始请求
            await self.send_full_client_request()
            
            # 3. 计算分段大小
            self.segment_size = self.calculate_segment_size()
            logger.info(f"音频分段大小: {self.segment_size} 字节")
            
            # 4. 开始录音
            self.recorder.start_recording()
            
            # 5. 启动处理任务
            self.is_running = True
            
            # 创建并发任务
            audio_task = asyncio.create_task(self._audio_processing_loop())
            response_task = asyncio.create_task(self._response_processing_loop())
            
            # 等待任务完成
            await asyncio.gather(audio_task, response_task, return_exceptions=True)
            
        except Exception as e:
            logger.error(f"启动实时语音识别失败: {e}")
            self.on_error(str(e))
            raise
        finally:
            await self.stop()
            
    async def stop(self):
        """停止实时语音识别"""
        if not self.is_running:
            return
            
        self.is_running = False
        
        # 停止录音
        self.recorder.stop_recording()
        
        # 发送最后一个音频包
        if self.audio_buffer and self.conn:
            try:
                await self._send_final_audio_segment()
            except Exception as e:
                logger.error(f"发送最终音频段失败: {e}")
        
        # 关闭连接
        if self.conn and not self.conn.closed:
            await self.conn.close()
            
        logger.info("实时语音识别已停止")
        
    async def _audio_processing_loop(self):
        """音频处理循环"""
        last_send_time = time.time()
        
        while self.is_running:
            try:
                # 获取音频数据
                audio_data = self.recorder.get_audio_data()
                if audio_data:
                    self.audio_buffer.extend(audio_data)
                    
                # 检查是否需要发送音频段
                current_time = time.time()
                if (len(self.audio_buffer) >= self.segment_size or 
                    current_time - last_send_time >= self.segment_duration / 1000):
                    
                    if self.audio_buffer:
                        await self._send_audio_segment()
                        last_send_time = current_time
                        
                await asyncio.sleep(0.01)  # 避免CPU占用过高
                
            except Exception as e:
                logger.error(f"音频处理错误: {e}")
                self.on_error(f"音频处理错误: {e}")
                break
                
    async def _response_processing_loop(self):
        """响应处理循环"""
        try:
            async for msg in self.conn:
                if not self.is_running:
                    break
                    
                if msg.type == aiohttp.WSMsgType.BINARY:
                    logger.debug(f"收到二进制响应，大小: {len(msg.data)} 字节")
                    response = ResponseParser.parse_response(msg.data)
                    logger.debug(f"解析响应: code={response.code}, is_last={response.is_last_package}")
                    await self._handle_response(response)
                    
                    if response.is_last_package or response.code != 0:
                        break
                        
                elif msg.type == aiohttp.WSMsgType.ERROR:
                    error_msg = f"WebSocket错误: {msg.data}"
                    logger.error(error_msg)
                    self.on_error(error_msg)
                    break
                    
                elif msg.type == aiohttp.WSMsgType.CLOSED:
                    logger.info("WebSocket连接已关闭")
                    break
                    
                elif msg.type == aiohttp.WSMsgType.TEXT:
                    logger.debug(f"收到文本消息: {msg.data}")
                    
        except Exception as e:
            logger.error(f"响应处理错误: {e}")
            self.on_error(f"响应处理错误: {e}")
            
    async def _send_audio_segment(self):
        """发送音频段"""
        if not self.audio_buffer:
            return
            
        # 创建WAV格式的音频数据
        segment_data = bytes(self.audio_buffer[:self.segment_size])
        wav_header = self.recorder.create_wav_header(len(segment_data))
        wav_data = wav_header + segment_data
        
        # 发送音频请求
        request = RequestBuilder.new_audio_only_request(
            self.seq, wav_data, is_last=False
        )
        
        await self.conn.send_bytes(request)
        logger.debug(f"已发送音频段: seq={self.seq}, size={len(segment_data)}")
        
        # 清空已发送的数据
        self.audio_buffer = self.audio_buffer[self.segment_size:]
        self.seq += 1
        
    async def _send_final_audio_segment(self):
        """发送最终音频段"""
        if not self.audio_buffer:
            return
            
        # 创建WAV格式的音频数据
        segment_data = bytes(self.audio_buffer)
        wav_header = self.recorder.create_wav_header(len(segment_data))
        wav_data = wav_header + segment_data
        
        # 发送最后的音频请求
        request = RequestBuilder.new_audio_only_request(
            self.seq, wav_data, is_last=True
        )
        
        await self.conn.send_bytes(request)
        logger.info(f"已发送最终音频段: seq={self.seq}, size={len(segment_data)}")
        
        self.audio_buffer.clear()
        
    async def _handle_response(self, response: AsrResponse):
        """处理识别响应"""
        try:
            logger.debug(f"处理响应: code={response.code}, payload_msg={response.payload_msg}")
            
            if response.code != 0:
                error_msg = f"识别错误: code={response.code}"
                logger.error(error_msg)
                self.on_error(error_msg)
                return
                
            if response.payload_msg and 'result' in response.payload_msg:
                result = response.payload_msg['result']
                logger.debug(f"识别结果: {result}")
                
                if 'text' in result:
                    text = result['text'].strip()
                    logger.debug(f"提取文本: '{text}', 长度: {len(text)}")
                    if text:
                        is_final = response.is_last_package
                        logger.info(f"识别到文本: '{text}' (final={is_final})")
                        self.on_result(text, is_final)
                    else:
                        logger.debug("文本为空，跳过")
                else:
                    logger.debug("结果中没有text字段")
            else:
                logger.debug("响应中没有result字段")
                        
        except Exception as e:
            logger.error(f"处理响应错误: {e}")
            self.on_error(f"处理响应错误: {e}")

class VoiceToTextApp:
    """语音转文本应用"""
    
    def __init__(self):
        self.client = None
        self.is_running = False
        
    def on_result(self, text: str, is_final: bool):
        """识别结果回调"""
        timestamp = time.strftime("%H:%M:%S")
        status = "[完成]" if is_final else "[识别中]"
        print(f"\r{timestamp} {status} {text}", end="" if not is_final else "\n")
        
    def on_error(self, error: str):
        """错误回调"""
        print(f"\n❌ 错误: {error}")
        
    async def start(self):
        """启动应用"""
        print("🎤 实时语音转文本工具")
        print("按 Ctrl+C 停止录音\n")
        
        try:
            async with RealtimeAsrClient(
                on_result=self.on_result,
                on_error=self.on_error
            ) as client:
                self.client = client
                self.is_running = True
                await client.start()
                
        except KeyboardInterrupt:
            print("\n\n👋 用户停止录音")
        except Exception as e:
            print(f"\n❌ 应用错误: {e}")
        finally:
            self.is_running = False
            
async def main():
    """主函数"""
    app = VoiceToTextApp()
    await app.start()
    
if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        print("\n程序已退出")