import math
from typing import Optional, List
import torch
from funasr.models.sanm.encoder import SANMEncoder
from funasr.models.bicif_paraformer.cif_predictor import CifPredictorV3
from funasr.frontends.wav_frontend import WavFrontend
from funasr.models.specaug.specaug import SpecAugment


class AudioTokenizer:
    def __init__(self, config):
        self.config = config

        self.audio_encoder = SANMEncoder(
            output_size=config.audio_encoder_output_size,
            attention_heads=config.audio_encoder_attention_heads,
            linear_units=config.audio_encoder_linear_units,
            num_blocks=config.audio_encoder_num_blocks,
            dropout_rate=config.audio_encoder_dropout_rate,
            positional_dropout_rate=config.audio_encoder_positional_dropout_rate,
            attention_dropout_rate=config.audio_encoder_attention_dropout_rate,
            input_layer=config.audio_encoder_input_layer,
            pos_enc_class=config.audio_encoder_pos_enc_class,
            normalize_before=config.audio_encoder_normalize_before,
            kernel_size=config.audio_encoder_kernel_size,
            sanm_shift=config.audio_encoder_sanm_shfit,
            selfattention_layer_type=config.audio_encoder_selfattention_layer_type,
        )

        self.cif_predictor = CifPredictorV3(
            idim=config.cif_idim,
            threshold=config.cif_threshold,
            l_order=config.cif_l_order,
            r_order=config.cif_r_order,
            tail_threshold=config.cif_tail_threshold,
            smooth_factor2=config.cif_smooth_factor2,
            noise_threshold2=config.cif_noise_threshold2,
            upsample_times=config.cif_upsample_times,
            use_cif1_cnn=config.cif_use_cif1_cnn,
            upsample_type=config.cif_upsample_type,
        )

        self.wav_frontend = WavFrontend(
            fs=config.wav_frontend_fs,
            window=config.wav_frontend_window,
            n_mels=config.wav_frontend_n_mels,
            frame_length=config.wav_frontend_frame_length,
            frame_shift=config.wav_frontend_frame_shift,
            lfr_m=config.wav_frontend_lfr_m,
            lfr_n=config.wav_frontend_lfr_n,
        )
        # apply_time_warp: false
        # time_warp_window: 5
        # time_warp_mode: bicubic
        # apply_freq_mask: true
        # freq_mask_width_range:
        # - 0
        # - 30
        # lfr_rate: 6
        # num_freq_mask: 1
        # apply_time_mask: true
        # time_mask_width_range:
        # - 0
        # - 12
        # num_time_mask: 1
        self.spec_augment = SpecAugment(
            apply_time_warp=config.specaug_apply_time_warp,
            time_warp_window=config.specaug_time_warp_window,
            time_warp_mode=config.specaug_time_warp_mode,
            apply_freq_mask=config.specaug_apply_freq_mask,
            freq_mask_width_range=config.specaug_freq_mask_width_range,
            lfr_rate=config.specaug_lfr_rate,
            num_freq_mask=config.specaug_num_freq_mask,
            apply_time_mask=config.specaug_apply_time_mask,
            time_mask_width_range=config.specaug_time_mask_width_range,
            num_time_mask=config.specaug_num_time_mask,
        )

    def forward(self,
                speech: torch.Tensor,
                speech_lengths: torch.Tensor,
                text: torch.Tensor,
                text_lengths: torch.Tensor,
                ):
        """Forward pass of the audio tokenizer for doasr.

        Args:
            speech (torch.Tensor): (batch_size, max_speech_length)
            speech_lengths (torch.Tensor): (batch_size)
            text (torch.Tensor): (batch_size, max_text_length)
            text_lengths (torch.Tensor): (batch_size)
        """
        pass
