from flask import Flask, Response, stream_with_context, request
from funasr import AutoModel
import numpy as np
import io
import wave
import gc

app = Flask(__name__)

# 初始化模型
chunk_size = [0, 10, 5]  # 600ms
encoder_chunk_look_back = 4
decoder_chunk_look_back = 1
model = AutoModel(model="paraformer-zh-streaming")

# 最大缓冲区大小（例如 100MB）
MAX_BUFFER_SIZE = 100 * 1024 * 1024

@app.route('/speech-to-text', methods=['POST'])
def speech_to_text():
    def generate():
        audio_buffer = None
        try:
            # 创建内存缓冲区
            audio_buffer = io.BytesIO()
            cache = {}
            chunk_stride = chunk_size[1] * 960  # 600ms
            results = []

            # 流式读取请求体
            total_size = 0
            for chunk in request.stream:
                if chunk:
                    total_size += len(chunk)
                    if total_size > MAX_BUFFER_SIZE:
                        yield f"data: {{\"status\": \"error\", \"message\": \"音频数据超过最大限制 {MAX_BUFFER_SIZE} 字节\"}}\n\n"
                        return
                    audio_buffer.write(chunk)

            # 将缓冲区数据移到开头并读取
            audio_buffer.seek(0)

            # 尝试读取音频数据
            with wave.open(audio_buffer, 'rb') as wf:
                sample_rate = wf.getframerate()
                audio_data = np.frombuffer(wf.readframes(wf.getnframes()), dtype=np.int16)

                # 确保音频是单声道
                if wf.getnchannels() > 1:
                    audio_data = np.mean(audio_data.reshape(-1, wf.getnchannels()), axis=1)

                # 处理音频流
                total_chunk_num = int((len(audio_data) - 1) / chunk_stride + 1)
                for i in range(total_chunk_num):
                    speech_chunk = audio_data[i * chunk_stride:(i + 1) * chunk_stride]
                    is_final = i == total_chunk_num - 1
                    res = model.generate(
                        input=speech_chunk,
                        cache=cache,
                        is_final=is_final,
                        chunk_size=chunk_size,
                        encoder_chunk_look_back=encoder_chunk_look_back,
                        decoder_chunk_look_back=decoder_chunk_look_back
                    )
                    results.extend(res)

                    # 实时返回部分结果
                    partial_text = ''.join([item['text'] for item in res if 'text' in item])
                    if partial_text:
                        yield f"data: {partial_text}\n\n"

                # 释放 audio_data
                del audio_data
                gc.collect()

            # 返回最终完整结果
            final_text = ''.join([item['text'] for item in results if 'text' in item])
            yield f"data: {{\"status\": \"success\", \"text\": \"{final_text}\"}}"

        except Exception as e:
            yield f"data: {{\"status\": \"error\", \"message\": \"{str(e)}\"}}"
        finally:
            # 清理资源
            if audio_buffer is not None:
                audio_buffer.close()
            del cache
            gc.collect()

    return Response(stream_with_context(generate()), mimetype='text/event-stream')

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=6000, debug=False, threaded=True)