# encoding:utf-8
import os
import argparse
import numpy as np
import pandas as pd

from torchaudio.io import StreamReader
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger

from pathlib import Path
from typing import Union, Callable
from uuid import uuid4
import warnings

logger = get_logger()

class DatadirWriter:
    """Writer class to create kaldi like data directory.

    Examples:
        >>> with DatadirWriter("output") as writer:
        ...     # output/sub.txt is created here
        ...     subwriter = writer["sub.txt"]
        ...     # Write "uttidA some/where/a.wav"
        ...     subwriter["uttidA"] = "some/where/a.wav"
        ...     subwriter["uttidB"] = "some/where/b.wav"

    """

    def __init__(self, p: Union[Path, str]):
        self.path = Path(p)
        self.chilidren = {}
        self.fd = None
        self.has_children = False
        self.keys = set()
        self.data = None

    def __enter__(self):
        return self

    def __getitem__(self, key: str) -> "DatadirWriter":
        if self.fd is not None:
            raise RuntimeError("This writer points out a file")

        if key not in self.chilidren:
            w = DatadirWriter((self.path / key))
            self.chilidren[key] = w
            self.has_children = True

        retval = self.chilidren[key]
        return retval

    def __setitem__(self, key: str, value: str):
        import csv
        if self.has_children:
            raise RuntimeError("This writer points out a directory")
        if key in self.keys:
            warnings.warn(f"Duplicated: {key}")

        if self.fd is None:
            self.path.parent.mkdir(parents=True, exist_ok=True)
            if str(self.path).endswith(".csv"):
                self.fd = pd.read_csv(self.path)
            else:
                self.fd = self.path.open("w", encoding="utf-8")
                # if isinstance(value, list):
                #     self.fd = csv.DictWriter(self.fd, fieldnames=value)
                #     self.fd.writeheader()

        self.keys.add(key)
        if not isinstance(self.fd, pd.DataFrame):
            self.fd.write(f"{key} {value}\n")
            self.fd.flush()
        else:
            if self.fd is not None:
                if isinstance(value, dict):
                    self.fd.loc[len(self.fd)] = value
                if isinstance(value, str):
                    self.fd.to_csv(self.path)

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()

    def close(self):
        if self.has_children:
            prev_child = None
            for child in self.chilidren.values():
                child.close()
                if prev_child is not None and prev_child.keys != child.keys:
                    warnings.warn(
                        f"Ids are mismatching between "
                        f"{prev_child.path} and {child.path}"
                    )
                prev_child = child

        elif self.fd is not None:
            if not isinstance(self.fd, pd.DataFrame):
                self.fd.close()


def split_contigous_frame(arrs, min_silent_time: int):
    pre_timestamp = None
    mask = [False]
    for arr in arrs["sentences"]:
        timestamp = [arr['ts_list'][0][0], arr['ts_list'][-1][-1]]
        # timestamp = [arr["start"], arr["end"]]
        if pre_timestamp is not None:
            if abs(timestamp[0] - pre_timestamp[1]) < min_silent_time:
                mask.append(True)
            else:
                mask.append(False)
        pre_timestamp = timestamp

    indices = list(np.where(np.array(mask) == False)[0])
    content = []
    pre_ind = None
    for idx, ind in enumerate(indices):
        if len(indices) == 1:
            content.append(arrs["sentences"][ind:])

        if pre_ind is not None:
            content.append(arrs["sentences"][pre_ind:ind])
            if idx + 1 == len(indices):
                content.append(arrs["sentences"][ind:])
        pre_ind = ind
    return content


def seconds_to_hms(seconds_num: int):
    """
    杈撳叆绉掓暟 杞崲涓� 鏃跺垎绉掕緭鍑�
    param: seconds_num integer 666
    return: hms str 00:00:00a
    """
    m, s = divmod(seconds_num, 60)
    h, m = divmod(m, 60)
    hms = "%02d:%02d:%02d" % (h, m, s)
    return hms


def merge_text_info(text_infos, chunk_id):
    results = []
    outputs = []
    for text_info in text_infos:
        start = None
        end = None
        text = ""
        for info in text_info:
            if start is None:
                start = info["ts_list"][0][0]
            end = info["ts_list"][-1][-1]
            text += info["text"]
        results.append({"文本段": text,
                        "时间开头": seconds_to_hms(int((start + chunk_id) / 1000)),
                        "时间结尾": seconds_to_hms(int(((end + chunk_id) / 1000))),
                        "是否为广告": 0})
        outputs.append({"text": text,
                        "start": start,
                        "end": end})

    return results, outputs


class FunasrInfer:
    def __init__(self, pipeline: Callable, stride, interval, min_silent_time):
        self.stride = stride
        self.pipeline = pipeline
        self.interval = interval
        self.min_silent_time = min_silent_time
        self.sample_rate = 16000
        self._chunk_id = 0

    def __call__(self, audio_in):
        header = ["文本段", "时间开头", "时间结尾", "是否为广告"]
        df = pd.DataFrame(columns=header)
        streamer = StreamReader(src=audio_in)
        logger.info(streamer.get_src_stream_info(0))
        streamer.add_basic_audio_stream(
            frames_per_chunk=self.stride * self.interval,
            # frames_per_chunk=sample_rate*1200,
            sample_rate=self.sample_rate,
            format="s16p",
            # num_channels=1,
            decoder_option={"threads": "0"}
        )
        for chunk in streamer.stream():
            data = chunk[0].data[:,0]
            if data.shape[0] < self.stride:
                value = self._format_result("{exception,输入数据时长小于120ms}", self._chunk_id)
                df.loc[len(df)] = value
            else:
                try:
                    audio_bytes = data.numpy().tobytes()
                    results = self.pipeline(audio_bytes, batch_size_token=1000) #batch_size_token_threshold_s=2
                    if "sentences" in results.keys():
                        text_infos = split_contigous_frame(results, self.min_silent_time)
                        rows, outputs = merge_text_info(text_infos, self._chunk_id * 120 * self.interval)
                        # print(rows)
                        for row, output in zip(rows, outputs):
                            uuid = str(uuid4())
                            # start = int(output["start"] * 16)
                            # end = int(output["end"] * 16)
                            # tmp_data = data[start:end, :].T.type(torch.float)
                            # emb = self.speaker.extract_embedding(tmp_data, self.sample_rate, cmn=False)
                            # mat_dir = os.path.join(str(ibest_writer.path), "mat_dir")
                            # os.makedirs(mat_dir, exist_ok=True)
                            # np.save(os.path.join(mat_dir, uuid + ".npy"), emb)
                            # row["特征"] = uuid + ".npy"

                            df.loc[len(df)] = row

                except Exception as e:
                    print(e)
                    value = self._format_result("{exception,推理错误}", self._chunk_id)
                    df.loc[len(df)] = value
            self._chunk_id += 1

        self.reset()

        return df

    def _format_result(self, text, chunk_id):
        result = {"文本段": text,
                  "时间开头": seconds_to_hms(int(chunk_id * 120 * self.interval / 1000)),
                  "时间结尾": seconds_to_hms(int((chunk_id + 1) * 120 * self.interval / 1000)),
                  "是否为广告": 0}
        return result

    def reset(self):
        self._chunk_id = 0


def main(model: FunasrInfer, audio_dir, writer):
    import time
    for audio_in in os.listdir(audio_dir):
        print(audio_in)
        if "09-29" in audio_in:
            continue
        audio_in = os.path.join(audio_dir, audio_in)
        ibest_writer = writer[f"{os.path.basename(audio_in)}"]
        header = ["文本段", "时间开头", "时间结尾", "是否为广告","特征"]
        ibest_writer["转录.csv"]["uuti"] = header
        model.infer(audio_in, ibest_writer)
        # time.sleep(30)



if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        prog='ProgramName',
        description='What the program does',
        epilog='Text at the bottom of help')

    parser.add_argument("--audio_in",
                        type=str,
                        default="test_video",
                        help="audio_in")
    parser.add_argument("--output_dir",
                        type=str,
                        default=None,
                        help="output_dir")
    parser.add_argument("--chunk_size",
                        type=str,
                        default="5, 10, 5",
                        help="chunk")
    parser.add_argument("--chunk_interval",
                        type=int,
                        default=10,
                        help="chunk")
    parser.add_argument('--interval', type=int, default=10000, help="timestamp interval")
    parser.add_argument('--min_silent_time', type=int, default=500, help="min silent time(ms)")
    args = parser.parse_args()
    args.chunk_size = [int(x) for x in args.chunk_size.split(",")]
    print(args)

    inference_pipeline = pipeline(
        task=Tasks.auto_speech_recognition,
        model='/root/.cache/modelscope/hub/',
    )

    # torch.cuda.set_device("cuda:1")

    # speaker = Speaker(onnx_path="model/model.onnx", lang="chs")
    # print("speaker model init")

    sample_rate = 16000
    stride = int(60 * args.chunk_size[1] / args.chunk_interval / 1000 * sample_rate * 2)
    model = FunasrInfer(inference_pipeline,stride, args.interval, args.min_silent_time)
    writer = DatadirWriter(args.output_dir)

    main(model, args.audio_in, writer)
    # ibest_writer = writer[f"{os.path.basename(audio_in)}"]
