from typing import Dict

import torch
import numpy as np

from torchaudio.io import StreamReader

from utils.logger import get_logger
from utils.timestamp_process import make_valid_ranges, merge_ranges, seconds_to_hms
from funasr import AutoModel

logger = get_logger()

class Transcriber:
    def __init__(self, sr=16000, max_step=3600, merge_thr=0.9, batch_size_threshold_s=16):
        # from .auto_model import AutoModel
        self.model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad", punc_model="ct-punc",
                               spk_model="cam++",
                               return_spk_res=True,
                               disable_pbar=True,
                               sentence_timestamp=True,
                               disable_update=True)
        self.model.cb_model.model_config = {"merge_thr": merge_thr}
        self.sr = sr
        self.max_step = max_step
        self.spk_id = 0
        self.batch_size_threshold_s = batch_size_threshold_s

    def __call__(self, silent_points, audio_path, duration) -> Dict:
        ranges = make_valid_ranges(silent_points, duration)
        merged_ranges = merge_ranges(ranges, self.max_step)
        new_silent_points = np.unique(merged_ranges)
        # print(new_silent_points)
        streamer = StreamReader(src=audio_path)
        streamer.add_basic_audio_stream(
            # frames_per_chunk=self.stride * self.interval,
            frames_per_chunk=self.sr,
            # frames_per_chunk=sample_rate*1200,
            sample_rate=self.sr,
            format="s16p",
            # num_channels=1,
            decoder_option={"threads": "0"}
        )

        chunk_id = 0
        chunk_cache = []
        outputs = []
        cur_id = new_silent_points[0]
        cur_end = 0
        for chunk in streamer.stream():
            if cur_id == 0:
                new_silent_points = new_silent_points[1:]
                cur_id = new_silent_points[0]
            if chunk is None:
                data = torch.zeros([self.sr, 1], dtype=torch.float16)[:, 0]
            else:
                data = chunk[0].data[:, 0]
            if chunk_id == cur_id:
                # print(cur_id, len(chunk_cache))
                chunk_cache, cur_end, outputs = self.process_chunk(chunk_cache, cur_id, cur_end, outputs)
                print(cur_end)

                chunk_cache.clear()
                new_silent_points = new_silent_points[1:]
                if new_silent_points.any():
                    cur_id = new_silent_points[0]
                    # print(outputs)
                    # break

            chunk_cache.append(data)
            chunk_id += 1

        if len(chunk_cache) > 0:
            print(cur_end)
            chunk_cache, cur_end, outputs = self.process_chunk(chunk_cache, cur_id, cur_end, outputs)

        self.spk_id = 0
        return outputs

    def process_chunk(self, chunk_cache, cur_id, cur_end, outputs):
        duration = len(chunk_cache)
        for i in range(0, len(chunk_cache), self.max_step):
            chunks = chunk_cache[:self.max_step]
            cur_start = cur_id - duration + i
            sub_duration = len(chunks)
            # print(sub_duration)
            cur_end = cur_start + sub_duration
            chunks = torch.cat(chunks, dim=0)
            # print(cur_start, cur_start + sub_duration)

            chunks = chunks.numpy().tobytes()
            cache = []
            try:
                result = self.model.generate(chunks,
                                             batch_size_threshold_s=self.batch_size_threshold_s)
                if result:
                    sentence_info = result[0].get("sentence_info")
                    if sentence_info is None:
                        outputs.append(self.format_output("", cur_start,
                                                          sub_duration, None))
                    else:
                        for info in sentence_info:
                            text = info.get("text")
                            fake_start = int((info.get("timestamp")[0][0] + cur_start * 1000) / 1000)
                            fake_end = int((info.get("timestamp")[-1][1] + cur_start * 1000) / 1000)
                            spk_id = info.get("spk") + self.spk_id
                            if spk_id not in cache:
                                cache.append(spk_id)
                            outputs.append(
                                self.format_output(text, fake_start, fake_end - fake_start, spk_id))
                else:
                    outputs.append(self.format_output("", cur_start,
                                                      sub_duration, None))
            except:
                outputs.append(self.format_output("cuda memory out", cur_start,
                                                  sub_duration, None))
            # logger.debug(outputs)
            chunk_cache = chunk_cache[self.max_step:]
            self.spk_id += len(cache)

        return chunk_cache, cur_end, outputs

    @staticmethod
    def format_output(text, start, duraion, spk_id):
        if spk_id is None:
            return {"text": text, "start": start, "end": start + duraion,
                    "start_str": seconds_to_hms(start),
                    "end_str": seconds_to_hms(start + duraion)}
        else:
            return {"text": text, "start": start, "end": start + duraion,
                    "start_str": seconds_to_hms(start),
                    "end_str": seconds_to_hms(start + duraion),
                    "spk_id": spk_id}
