import json

from langchain_core.language_models.llms import LLM
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Mapping, Optional, Union, Iterator, AsyncGenerator, \
    AsyncIterator
from langchain_core.callbacks import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
import soundfile
from langchain_core.outputs import GenerationChunk
from modelscope import Pipeline
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks


class FunASRStreamingLLM(LLM):
    pipeline: Any
    model_kwargs: Dict[str, Any]
    def __init__(self, **model_kwargs: Any):
        try:
            from funasr import AutoModel
        except ImportError as e:
            raise ImportError(
                "Could not import AutoModel from funasr. Please install it"
                " with `pip install funasr`."
            ) from e
        model_kwargs = model_kwargs or {}
        super().__init__(
            **{
                "model_kwargs": model_kwargs,
            }
        )
        self.pipeline = pipeline(
            task=Tasks.auto_speech_recognition,
            model='damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online',
            model_revision="v2.0.4",
            # vad_model='damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',
            # vad_model_revision="v2.0.4",
            punc_model='iic/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727',
            punc_model_revision="v2.0.4",
            ngpu=1,
        )
    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> str:
        res = self.pipeline(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav")
        return json.dumps(res, ensure_ascii=False)

    # def _stream(
    #     self,
    #     prompt: str,
    #     stop: Optional[List[str]] = None,
    #     run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
    #     **kwargs: Any,
    # ) -> AsyncIterator[GenerationChunk]:


    @property
    def _llm_type(self) -> str:
        return "funasr_streaming"

import asyncio
import soundfile
import os
if __name__ == '__main__':
    chunk_size = [5, 10, 5]  # [0, 10, 5] 600ms, [0, 8, 4] 480ms
    encoder_chunk_look_back = 0  # number of chunks to lookback for encoder self-attention
    decoder_chunk_look_back = 0  # number of encoder chunks to lookback for decoder cross-attention
    # 假设音频文件路径为 "path/to/audio.wav"
    wav_file = "asr_example_zh.wav"
    speech, sample_rate = soundfile.read(wav_file)

    chunk_stride = chunk_size[1] * 960  # 600ms、480ms

    cache = {}
    total_chunk_num = int(len((speech) - 1) / chunk_stride + 1)
    for i in range(total_chunk_num):
        speech_chunk = speech[i * chunk_stride:(i + 1) * chunk_stride]
        is_final = i == total_chunk_num - 1
        res = FunASRStreamingLLM(chunk_data=speech_chunk,
                                 cache=cache,
                                 is_final=is_final,
                                 chunk_size=chunk_size,
                                 encoder_chunk_look_back=encoder_chunk_look_back,
                                 decoder_chunk_look_back=decoder_chunk_look_back,
                             ).invoke("")
        print(res)

