import os.path
import sys

import librosa
import torch
import numpy as np

from torchaudio.io import StreamReader
from torchaudio.transforms import MFCC
from utils.timer import Timer, get_time_str
from utils.logger import get_logger

logger = get_logger()

class MFCCInfer:
    def __init__(self):
        self.timer = Timer()
        self.sr = 16000
        self.step = 1
        self.mfcc = MFCC(sample_rate=self.sr, n_mfcc=40)

    def __call__(self, audio_in):

        streamer = StreamReader(audio_in)
        for i in range(streamer.num_src_streams):
            info = streamer.get_src_stream_info(i)
            if info.media_type == 'audio':
                streamer_info = info
        streamer.add_basic_audio_stream(
            # frames_per_chunk=sample_rate,
            frames_per_chunk=self.sr,
            sample_rate=self.sr,
            decoder_option={"threads": "0"}
        )
        embs = []
        silent_points = []
        chunk_id = 0
        self.timer.tic()
        for chunk in streamer.stream():
            base_data = chunk[0].data
            chunk_points = self.get_zero_crossing(base_data, chunk_id, self.sr, self.step)
            data = base_data.T
            if data.shape[1] < self.sr:
                logger.info(chunk_id)
                continue
            pred = self.mfcc(data).mean(2)
            # logger.info(pred.shape)
            embs.append(pred)
            # logger.info(f"emb length is {len(embs)}")
            chunk_id += 1
            silent_points.extend(chunk_points)
            # if chunk_id > 1:
            #     break
        time_diff = self.timer.toc(False)
        logger.debug(get_time_str(time_diff))
        logger.debug(self.timer.avg_time_str())
        prediction = torch.cat(embs).numpy()
        # np.save(save_path, prediction)

        return prediction, silent_points


    @staticmethod
    def get_zero_crossing(wavform, chunk_id, sample_rate, step):
        wavform = wavform.squeeze(1).numpy().astype(np.float32)
        zcrs_init = librosa.feature.zero_crossing_rate(wavform, frame_length=2038, hop_length=512)
        D, I = np.where(zcrs_init == 0.0)
        zcrs_times = librosa.frames_to_time(np.arange(zcrs_init.shape[1]), sr=sample_rate, hop_length=512)
        silence_time = []
        for i in I:
            silence_time.append(int(zcrs_times[i] + chunk_id * step))
        silence_time = np.unique(silence_time).tolist()
        return silence_time