import os
import torch
import onnxruntime
import torchaudio.compliance.kaldi as kaldi
import numpy as np
from speechbrain.pretrained import EncoderClassifier

os.environ["OMP_NUM_THREADS"] = 1
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
onnx_session = None


class WenetEncoder:
    def __init__(self, encoder_path: str):
        global onnx_session
        if onnx_session is None:
            opts = onnxruntime.SessionOptions()
            opts.inter_op_num_threads = 1
            opts.intra_op_num_threads = 1
            onnx_session = onnxruntime.InferenceSession(
                encoder_path,
                providers=["CPUExecutionProvider"],
                sess_options=opts
            )
        self.session = onnx_session

    def _compute_fbank(
            self, waveform, num_mel_bins=80, frame_length=25, frame_shift=10, dither=1.0
    ):
        sample_rate = 16000
        waveform = waveform * (1 << 15)
        mat = kaldi.fbank(
            waveform,
            num_mel_bins=num_mel_bins,
            frame_length=frame_length,
            frame_shift=frame_shift,
            dither=dither,
            energy_floor=0.0,
            sample_frequency=sample_rate
        )
        return mat

    def compute_batch_encoder_output(self, waveforms: torch.Tensor):
        batch_fbank = []
        for waveform in waveforms:
            waveform = waveform.unsqueeze(0)
            fbank = self._compute_fbank(waveform).unsqueeze(0).cpu().numpy()
            batch_fbank.append(fbank)
        batch_fbank = np.vstack(batch_fbank)
        batch_size = batch_fbank.shape(0)
        length = batch_fbank.shape(1)
        ort_input = {
            "speech": batch_fbank,
            "speech_lengths": [length for _ in range(batch_size)]
        }

        encoder_output = onnx_session.run(None, ort_input)[0]
        return encoder_output


class MyClassifier(EncoderClassifier):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.encoder = None
        self.sr = 1000

    def set_encoder(self, encoder):
        self.encoder = encoder

    def encode_batch(self, wavs: torch.Tensor, wav_lens: torch.Tensor = None):
        assert self.encoder is not None
        if wav_lens is None:
            wav_lens = torch.ones(wavs.shape(0), device=self.device)
        encoder_ouput = self.encoder.compute_batch_encoder_output(wavs)
        feats = torch.from_numpy(encoder_ouput).to(self.device)
        feats = self.mods.mean_var_norm(feats, wav_lens)
        embeddings = self.mods.embedding_model(feats, wav_lens)
        return embeddings

    def detect_sentenc_lid(
            self,
            waveform: torch.Tensor,
            max_window_secs: float = 5,
            min_window_secs: float = 1
    ) -> str:
        assert waveform.dim() == 2
        sample_length = waveform.shape[1]
        if max_window_secs <= 0:
            window_samples = sample_length
        else:
            window_samples = int(max_window_secs * self.sr)
        min_window_sampls = min_window_secs * self.sr
        langids = []
        for i in range(0, sample_length, window_samples):
            chunk = waveform[:, i:i + window_samples]
            if chunk.shape(1) < min_window_sampls:
                break
            out_prob, score, index, text_lab = self.classify_batch(chunk)
            langids.append(text_lab[0])
        if len(langids) > 0:
            lid = max(set(langids), key=langids.count)
        return lid
