from funasr import AutoModel
import soundfile
import os
from flask import Flask, stream_with_context, jsonify, Response
import time
import difflib

app = Flask(__name__)

chunk_size = [0, 10, 5]  # [0, 10, 5] 600ms, [0, 8, 4] 480ms
encoder_chunk_look_back = 4  # number of chunks to lookback for encoder self-attention
decoder_chunk_look_back = 1  # number of encoder chunks to lookback for decoder cross-attention

# model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.4")
model = AutoModel(model="E:\\aimodel\\speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
                  model_revision="v2.0.4")
print("Model loaded")


@app.route('/stream', methods=['POST'])
def streamed_response():
    wav_file = os.path.join(model.model_path, "example\\asr_example.wav")
    speech, sample_rate = soundfile.read(wav_file)
    chunk_stride = chunk_size[1] * 960  # 600ms

    cache = {}
    total_chunk_num = int(len((speech) - 1) / chunk_stride + 1)

    @stream_with_context
    def generate():

        for i in range(total_chunk_num):
            speech_chunk = speech[i * chunk_stride:(i + 1) * chunk_stride]
            is_final = i == total_chunk_num - 1
            res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size,
                                 use_itn=True,
                                 encoder_chunk_look_back=encoder_chunk_look_back,
                                 decoder_chunk_look_back=decoder_chunk_look_back)
            # print(res)
            if res and len(res) > 0:
                 current_text = res[0].get("text", "")
                 if current_text:
                     yield current_text.encode('utf-8')
            time.sleep(1)

    return Response(generate(), mimetype='text/plain')


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000, threaded=True)

    # wav_file = os.path.join(model.model_path, "example\\asr_example.wav")
    # speech, sample_rate = soundfile.read(wav_file)
    # chunk_stride = chunk_size[1] * 960  # 600ms
    #
    # cache = {}
    # total_chunk_num = int(len((speech) - 1) / chunk_stride + 1)
    # for i in range(total_chunk_num):
    #     speech_chunk = speech[i * chunk_stride:(i + 1) * chunk_stride]
    #     is_final = i == total_chunk_num - 1
    #     res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size,
    #                          encoder_chunk_look_back=encoder_chunk_look_back,
    #                          decoder_chunk_look_back=decoder_chunk_look_back)
    #     print(res)
