#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
实时语音识别服务
提供完整的语音识别API服务，支持多种模型和实时流式识别
"""

import os
import argparse
import time
import threading
import queue
import json
import numpy as np
import sounddevice as sd
import onnxruntime as ort
import librosa
from flask import Flask, request, jsonify, render_template_string
from flask_socketio import SocketIO, emit
import base64
import io
import wave

# 配置参数
parser = argparse.ArgumentParser(description='实时语音识别服务')
parser.add_argument('--model_path', type=str, default='./models/funasr_int8.onnx', help='模型路径')
parser.add_argument('--host', type=str, default='0.0.0.0', help='服务器地址')
parser.add_argument('--port', type=int, default=5000, help='服务器端口')
parser.add_argument('--sample_rate', type=int, default=16000, help='采样率')
parser.add_argument('--chunk_duration', type=float, default=2.0, help='音频块时长(秒)')
args = parser.parse_args()

# 创建Flask应用
app = Flask(__name__)
app.config['SECRET_KEY'] = 'asr_service_secret_key'
socketio = SocketIO(app, cors_allowed_origins="*")

class ASREngine:
    """语音识别引擎"""
    
    def __init__(self, model_path, sample_rate=16000):
        self.model_path = model_path
        self.sample_rate = sample_rate
        self.session = None
        self.char_dict = {}
        
        self._load_model()
        self._init_char_dict()
        
    def _load_model(self):
        """加载ONNX模型"""
        try:
            # 配置ONNX Runtime会话选项
            options = ort.SessionOptions()
            options.intra_op_num_threads = 3
            options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
            options.enable_mem_pattern = True
            options.enable_mem_reuse = True
            
            self.session = ort.InferenceSession(self.model_path, options)
            print(f"模型加载成功: {self.model_path}")
            
        except Exception as e:
            print(f"模型加载失败: {str(e)}")
            raise
    
    def _init_char_dict(self):
        """初始化字符字典"""
        # 简化的字符映射表
        # 实际应使用模型对应的完整词表
        common_chars = "的一是在不了有和人这中大为上个国我以要他时来用们生到作地于出就分对成会可主发年动同工也能下过子说产种面而方后多定行学法所民得经十三之进着等部度家电力里如水化高自二理起小物现实加量都两体制机当使点从业本去把性好应开它合还因由其些然前外天政四日那社义事平形相全表间样与关各重新线内数正心反你明看原又么利比或但质气第向道命此变条只没结解问意建月公无系军很情者最立代想已通并提直题党程展五果料象员革位入常文总次品式活设及管特件长求老头基资边流路级少图山统接知较将组见计别她手角期根论运农指几九区强放决西被干做必战先回则任取据处队南给色光门即保治北造百规热领七海口东导器压志世金增争济阶油思术极交受联什认六共权收证改清己美再采转更单风切打白教速花带安场身车例真务具万每目至达走积示议声报斗完类八离华名确才科张信马节话米整空元况今集温传土许步群广石记需段研界拉林律叫且究观越织装影算低持音众书布复容儿须际商非验连断深难近矿千周委素技备半办青省列习响约支般史感劳便团往酸历市克何除消构府称太准精值号率族维划选标写存候毛亲快效斯院查江型眼王按格养易置派层片始却专状育厂京识适属圆包火住调满县局照参红细引听该铁价严"
        
        for i, char in enumerate(common_chars):
            self.char_dict[i + 1] = char
        self.char_dict[0] = ''  # blank token
        
    def extract_features(self, audio_data):
        """提取音频特征"""
        try:
            # 确保音频数据是一维的
            if len(audio_data.shape) > 1:
                audio_data = audio_data.flatten()
            
            # 提取梅尔频谱特征
            mel_spec = librosa.feature.melspectrogram(
                y=audio_data,
                sr=self.sample_rate,
                n_fft=400,
                hop_length=160,
                win_length=400,
                n_mels=80,
                fmin=0,
                fmax=8000
            )
            
            # 转换为对数刻度
            log_mel_spec = librosa.power_to_db(mel_spec)
            
            # 归一化
            mean = np.mean(log_mel_spec)
            std = np.std(log_mel_spec)
            normalized_features = (log_mel_spec - mean) / (std + 1e-5)
            
            # 添加批次维度
            features = np.expand_dims(normalized_features, axis=0).astype(np.float32)
            
            return features
            
        except Exception as e:
            print(f"特征提取出错: {str(e)}")
            # 返回空特征
            return np.zeros((1, 80, 500), dtype=np.float32)
    
    def decode_result(self, result):
        """解码模型输出为文本"""
        try:
            # 获取最可能的字符ID序列
            if len(result.shape) == 3:
                # 如果是3维，取第一个batch
                result = result[0]
            
            ids = np.argmax(result, axis=1)
            
            # 移除重复的ID和空白标记
            prev_id = -1
            text_ids = []
            for id in ids:
                if id != prev_id and id != 0:  # 不等于前一个ID且不是空白
                    text_ids.append(id)
                prev_id = id
            
            # 转换为文本
            text = ''.join([self.char_dict.get(id, '') for id in text_ids])
            return text
            
        except Exception as e:
            print(f"解码出错: {str(e)}")
            return ""
    
    def recognize(self, audio_data):
        """识别音频"""
        try:
            # 提取特征
            features = self.extract_features(audio_data)
            
            # 执行推理
            start_time = time.time()
            input_name = self.session.get_inputs()[0].name
            result = self.session.run(None, {input_name: features})[0]
            end_time = time.time()
            
            # 解码结果
            text = self.decode_result(result)
            
            inference_time = end_time - start_time
            
            return {
                'text': text,
                'inference_time': inference_time,
                'audio_length': len(audio_data) / self.sample_rate,
                'success': True
            }
            
        except Exception as e:
            return {
                'text': '',
                'inference_time': 0,
                'audio_length': 0,
                'success': False,
                'error': str(e)
            }

class StreamingASR:
    """流式语音识别"""
    
    def __init__(self, asr_engine, chunk_duration=2.0, sample_rate=16000):
        self.asr_engine = asr_engine
        self.chunk_duration = chunk_duration
        self.sample_rate = sample_rate
        self.chunk_size = int(chunk_duration * sample_rate)
        
        self.audio_buffer = []
        self.is_recording = False
        self.recording_thread = None
        
    def start_recording(self, callback=None):
        """开始录音"""
        self.is_recording = True
        self.audio_buffer = []
        self.callback = callback
        
        self.recording_thread = threading.Thread(target=self._recording_worker)
        self.recording_thread.daemon = True
        self.recording_thread.start()
        
    def stop_recording(self):
        """停止录音"""
        self.is_recording = False
        if self.recording_thread:
            self.recording_thread.join(timeout=1.0)
    
    def _recording_worker(self):
        """录音工作线程"""
        def audio_callback(indata, frames, time, status):
            if status:
                print(f"录音状态: {status}")
            
            if self.is_recording:
                # 将音频数据添加到缓冲区
                self.audio_buffer.extend(indata.flatten())
                
                # 如果缓冲区达到块大小，进行识别
                if len(self.audio_buffer) >= self.chunk_size:
                    # 取出一个块进行识别
                    chunk_data = np.array(self.audio_buffer[:self.chunk_size])
                    self.audio_buffer = self.audio_buffer[self.chunk_size//2:]  # 保留一半重叠
                    
                    # 异步识别
                    threading.Thread(target=self._process_chunk, args=(chunk_data,)).start()
        
        try:
            with sd.InputStream(
                samplerate=self.sample_rate,
                channels=1,
                dtype='float32',
                callback=audio_callback,
                blocksize=1024
            ):
                while self.is_recording:
                    time.sleep(0.1)
                    
        except Exception as e:
            print(f"录音出错: {str(e)}")
    
    def _process_chunk(self, chunk_data):
        """处理音频块"""
        result = self.asr_engine.recognize(chunk_data)
        
        if self.callback and result['text'].strip():
            self.callback(result)

# 全局ASR引擎实例
asr_engine = None
streaming_asr = None

@app.route('/')
def index():
    """主页"""
    html_template = """
    <!DOCTYPE html>
    <html>
    <head>
        <title>实时语音识别服务</title>
        <meta charset="utf-8">
        <script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.0.1/socket.io.js"></script>
        <style>
            body { font-family: Arial, sans-serif; margin: 40px; }
            .container { max-width: 800px; margin: 0 auto; }
            .button { padding: 10px 20px; margin: 10px; font-size: 16px; cursor: pointer; }
            .start-btn { background-color: #4CAF50; color: white; border: none; }
            .stop-btn { background-color: #f44336; color: white; border: none; }
            .upload-btn { background-color: #008CBA; color: white; border: none; }
            .result { margin: 20px 0; padding: 15px; border: 1px solid #ddd; background-color: #f9f9f9; }
            .stats { margin: 10px 0; font-size: 14px; color: #666; }
            #status { font-weight: bold; margin: 10px 0; }
            #results { max-height: 400px; overflow-y: auto; }
        </style>
    </head>
    <body>
        <div class="container">
            <h1>实时语音识别服务</h1>
            
            <div>
                <h3>实时录音识别</h3>
                <button id="startBtn" class="button start-btn">开始录音</button>
                <button id="stopBtn" class="button stop-btn" disabled>停止录音</button>
                <div id="status">就绪</div>
            </div>
            
            <div>
                <h3>文件上传识别</h3>
                <input type="file" id="audioFile" accept="audio/*">
                <button id="uploadBtn" class="button upload-btn">上传识别</button>
            </div>
            
            <div>
                <h3>识别结果</h3>
                <div id="results"></div>
            </div>
        </div>

        <script>
            const socket = io();
            const startBtn = document.getElementById('startBtn');
            const stopBtn = document.getElementById('stopBtn');
            const uploadBtn = document.getElementById('uploadBtn');
            const audioFile = document.getElementById('audioFile');
            const status = document.getElementById('status');
            const results = document.getElementById('results');
            
            let isRecording = false;
            
            // 开始录音
            startBtn.addEventListener('click', function() {
                socket.emit('start_recording');
                isRecording = true;
                startBtn.disabled = true;
                stopBtn.disabled = false;
                status.textContent = '正在录音...';
            });
            
            // 停止录音
            stopBtn.addEventListener('click', function() {
                socket.emit('stop_recording');
                isRecording = false;
                startBtn.disabled = false;
                stopBtn.disabled = true;
                status.textContent = '录音已停止';
            });
            
            // 文件上传
            uploadBtn.addEventListener('click', function() {
                const file = audioFile.files[0];
                if (!file) {
                    alert('请选择音频文件');
                    return;
                }
                
                const reader = new FileReader();
                reader.onload = function(e) {
                    const audioData = e.target.result;
                    socket.emit('recognize_file', {
                        filename: file.name,
                        data: audioData
                    });
                    status.textContent = '正在识别文件...';
                };
                reader.readAsDataURL(file);
            });
            
            // 接收识别结果
            socket.on('recognition_result', function(data) {
                const resultDiv = document.createElement('div');
                resultDiv.className = 'result';
                
                const timestamp = new Date().toLocaleTimeString();
                const stats = `时间: ${timestamp} | 推理时间: ${(data.inference_time * 1000).toFixed(1)}ms | 音频长度: ${data.audio_length.toFixed(1)}s`;
                
                resultDiv.innerHTML = `
                    <div><strong>识别结果:</strong> ${data.text || '(无识别结果)'}</div>
                    <div class="stats">${stats}</div>
                `;
                
                results.insertBefore(resultDiv, results.firstChild);
                
                if (!isRecording) {
                    status.textContent = '识别完成';
                }
            });
            
            // 连接状态
            socket.on('connect', function() {
                status.textContent = '已连接到服务器';
            });
            
            socket.on('disconnect', function() {
                status.textContent = '与服务器断开连接';
                isRecording = false;
                startBtn.disabled = false;
                stopBtn.disabled = true;
            });
        </script>
    </body>
    </html>
    """
    return render_template_string(html_template)

@app.route('/api/recognize', methods=['POST'])
def api_recognize():
    """API接口：识别音频文件"""
    try:
        if 'audio' not in request.files:
            return jsonify({'error': '没有音频文件'}), 400
        
        audio_file = request.files['audio']
        
        # 读取音频数据
        audio_data, sr = librosa.load(io.BytesIO(audio_file.read()), sr=args.sample_rate)
        
        # 执行识别
        result = asr_engine.recognize(audio_data)
        
        return jsonify(result)
        
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/api/status')
def api_status():
    """API接口：获取服务状态"""
    return jsonify({
        'status': 'running',
        'model_path': args.model_path,
        'sample_rate': args.sample_rate,
        'chunk_duration': args.chunk_duration
    })

@socketio.on('start_recording')
def handle_start_recording():
    """处理开始录音请求"""
    global streaming_asr
    
    def result_callback(result):
        emit('recognition_result', result)
    
    streaming_asr.start_recording(callback=result_callback)
    emit('recording_started')

@socketio.on('stop_recording')
def handle_stop_recording():
    """处理停止录音请求"""
    global streaming_asr
    
    streaming_asr.stop_recording()
    emit('recording_stopped')

@socketio.on('recognize_file')
def handle_recognize_file(data):
    """处理文件识别请求"""
    try:
        # 解码base64音频数据
        audio_data_b64 = data['data'].split(',')[1]  # 移除data:audio/...;base64,前缀
        audio_bytes = base64.b64decode(audio_data_b64)
        
        # 加载音频
        audio_data, sr = librosa.load(io.BytesIO(audio_bytes), sr=args.sample_rate)
        
        # 执行识别
        result = asr_engine.recognize(audio_data)
        
        emit('recognition_result', result)
        
    except Exception as e:
        emit('recognition_result', {
            'text': '',
            'inference_time': 0,
            'audio_length': 0,
            'success': False,
            'error': str(e)
        })

def main():
    """主函数"""
    global asr_engine, streaming_asr
    
    print("初始化语音识别服务...")
    
    # 初始化ASR引擎
    asr_engine = ASREngine(args.model_path, args.sample_rate)
    
    # 初始化流式ASR
    streaming_asr = StreamingASR(asr_engine, args.chunk_duration, args.sample_rate)
    
    print(f"服务启动成功！")
    print(f"模型: {args.model_path}")
    print(f"访问地址: http://{args.host}:{args.port}")
    print(f"API文档: http://{args.host}:{args.port}/api/status")
    
    # 启动服务
    socketio.run(app, host=args.host, port=args.port, debug=False)

if __name__ == "__main__":
    main() 