import copy
from typing import Optional, Tuple, Union

import torch
import logging
import torch.nn.functional as F

# from espnet2.asr.specaug.specaug import SpecAug
from wenet.utils.mask import make_pad_mask


class OpenAIWhisperEncoder(torch.nn.Module):
    """Transformer-based Speech Encoder from OpenAI's Whisper Model:

    URL: https://github.com/openai/whisper
    """

    def __init__(
        self,
        input_size: int = 1,
        dropout_rate: float = 0.0,
        whisper_model: str = "small",
        download_dir: str = None,
        use_specaug: bool = False,
        specaug_conf: Union[dict, None] = None,
        do_pad_trim: bool = False,
        use_lora: bool = False, # add to use lora
        init: bool = True,
        distill: bool = False,
    ):
        try:
            import whisper
            from whisper.audio import HOP_LENGTH, N_FFT, N_MELS, N_SAMPLES
        except Exception as e:
            print("Error: whisper is not properly installed.")
            print(
                "Please install whisper with: cd ${MAIN_ROOT}/tools &&",
                "./installers/install_whisper.sh",
            )
            raise e

        super().__init__()

        self.n_fft = N_FFT
        self.win_length = N_FFT
        self.hop_length = HOP_LENGTH
        self.n_mels = N_MELS

        self.mel_filters = whisper.audio.mel_filters
        
        self.distill = distill        

        # note that originally Whisper doesn't use dropouts
        self.dropout = torch.nn.Dropout(dropout_rate)

        if not use_lora and not distill and init:
            assert whisper_model in whisper.available_models()
            _model = whisper.load_model(whisper_model, download_root=f"/home/work_nfs5_ssd/pkchen/workspace/whisper", device='cpu')
            

        
        # if not init:
        #     from whisper import ModelDimensions
        #     from whisper import _ALIGNMENT_HEADS
        #     from whisper import Whisper

        #     checkpoint = torch.load(f"/home/work_nfs5_ssd/pkchen/workspace/whisper/{whisper_model}.pt", map_location='cpu')
        #     dims = ModelDimensions(**checkpoint["dims"])
        #     _model = Whisper(dims)
        #     _model.set_alignment_heads(_ALIGNMENT_HEADS[whisper_model])

        # add for lora
        # if use_lora:
        #     from espnet2.asr.whisper_lora import Whisper_lora
        #     from whisper import ModelDimensions
        #     from whisper import _ALIGNMENT_HEADS
        #     import loralib as lora

        #     device = "cuda" if torch.cuda.is_available() else "cpu"
        #     checkpoint = torch.load(f"/home/work_nfs5_ssd/pkchen/workspace/whisper/{whisper_model}.pt", map_location=device)
        #     dims = ModelDimensions(**checkpoint["dims"])
        #     _model = Whisper_lora(dims)
        #     _model.load_state_dict(checkpoint["model_state_dict"], strict=False)
        #     _model.set_alignment_heads(_ALIGNMENT_HEADS[whisper_model])
        #     lora.mark_only_lora_as_trainable(_model)
        # add for lora

        # add for distill
        # if distill:
        #     from wenet.transformer.whisper_distill import Whisper_distill
        #     from whisper import ModelDimensions
        #     from whisper import _ALIGNMENT_HEADS

        #     # device = "cuda" if torch.cuda.is_available() else "cpu"
        #     checkpoint = torch.load(f"/home/work_nfs5_ssd/pkchen/workspace/whisper/{whisper_model}.pt", map_location='cpu')

        #     dims = ModelDimensions(**checkpoint["dims"])
        #     _model = Whisper_distill(dims)
        #     _model.load_state_dict(checkpoint["model_state_dict"], strict=True)
        #     _model.set_alignment_heads(_ALIGNMENT_HEADS[whisper_model])
        #     self.teacher_encoders = copy.deepcopy(_model.encoder)
        #     self.teacher_encoders.requires_grad_(False)
        #     del _model

        #     dims = checkpoint["dims"]
        #     dims['n_audio_state'] = 640
        #     dims['n_text_state'] = 640
        #     dims = ModelDimensions(**dims)
        #     _model = Whisper_distill(dims)
        #     _model.set_alignment_heads(_ALIGNMENT_HEADS[whisper_model])
            
        #     self.KLD_loss = torch.nn.KLDivLoss(reduction="batchmean")  
        # add for distill

        self.encoders = copy.deepcopy(_model.encoder)
        checkpoint = torch.load("/home/work_nfs5_ssd/yzli/workspace/wenet/examples/aishell/s0/exp/whisper_large_v1/avg_3.pt", map_location='cpu')
        encoder_state_dict = {}
        main_state_dict = self.encoders.state_dict()
        for key in checkpoint.keys():
            if key.startswith('encoder'):
                new_key = key.split('.')[2:]
                new_key = '.'.join(new_key)
                # logging.info(f"{new_key}")
                encoder_state_dict[new_key] = checkpoint[key]
        # logging.info(f"blocks.31.mlp.0.weight {encoder_state_dict['blocks.31.mlp.0.weight']}")
        # for name, param in self.encoders.named_parameters():
        #     if name == "blocks.31.mlp.0.weight":
        #         logging.info(f"before: {name} {param}")
        main_state_dict.update(encoder_state_dict)
        self.encoders.load_state_dict(main_state_dict, strict=False)
        # for name, param in self.encoders.named_parameters():
        #     if name == "blocks.31.mlp.0.weight":
        #         logging.info(f"after: {name} {param}")
        self.encoders.train()

        del _model

        if use_specaug:
            self.specaug = SpecAug(**specaug_conf)
        else:
            self.specaug = None

        self.do_pad_trim = do_pad_trim
        self.pad_samples = N_SAMPLES

    def output_size(self) -> int:
        return self.encoders.ln_post.normalized_shape[-1]

    def pad_or_trim(
        self,
        array: torch.Tensor,
        length: int,
        axis: int = -1,
    ) -> torch.Tensor:
        """Pad or trim the audio array to N_SAMPLES.

        Used in zero-shot inference cases.
        """
        if array.shape[axis] > length:
            array = array.index_select(
                dim=axis, index=torch.arange(length).to(array.device)
            )

        if array.shape[axis] < length:
            pad_widths = [(0, 0)] * array.ndim
            pad_widths[axis] = (0, length - array.shape[axis])
            array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])

        return array

    def log_mel_spectrogram(
        self,
        audio: torch.Tensor,
        ilens: torch.Tensor = None,
    ) -> torch.Tensor:
        """Use log-mel spectrogram computation native to Whisper training"""
        window = torch.hann_window(self.win_length).to(audio.device)
        stft = torch.stft(
            audio, self.n_fft, self.hop_length, window=window, return_complex=True
        )

        # whisper deletes the last frame by default (Shih-Lun)
        magnitudes = stft[..., :-1].abs() ** 2

        filters = self.mel_filters(audio.device, self.n_mels)
        mel_spec = filters @ magnitudes

        log_spec = torch.clamp(mel_spec, min=1e-10).log10()

        if ilens is not None:
            olens = ilens // self.hop_length
        else:
            olens = None

        log_spec = torch.maximum(
            log_spec,
            log_spec.view(audio.size(0), -1).max(dim=-1)[0][:, None, None] - 8.0,
        )
        log_spec = (log_spec + 4.0) / 4.0

        return log_spec, olens

    def whisper_encode(
        self,
        input: torch.Tensor,
        ilens: torch.Tensor = None,
    ) -> torch.Tensor:
        x = F.gelu(self.encoders.conv1(input))
        x = F.gelu(self.encoders.conv2(x))
        x = x.permute(0, 2, 1)

        n_frames = x.size(1)
        max_pos = self.encoders.positional_embedding.size(0)
        if n_frames <= max_pos:
            x = (x + self.encoders.positional_embedding[: x.size(1), :]).to(x.dtype)
        else:
            # due to positional encoding, audios >30 sec won't be accepted
            x = x[:, :max_pos, :] + self.encoders.positional_embedding

        x = self.dropout(x)
                      
        if self.distill:
            with torch.no_grad():
                t_x = F.gelu(self.teacher_encoders.conv1(input))
                t_x = F.gelu(self.teacher_encoders.conv2(t_x))
                t_x = t_x.permute(0, 2, 1)

                max_pos = self.teacher_encoders.positional_embedding.size(0)
                if n_frames <= max_pos:
                    t_x = (t_x + self.teacher_encoders.positional_embedding[: x.size(1), :]).to(x.dtype)
                else:
                    t_x = t_x[:, :max_pos, :] + self.teacher_encoders.positional_embedding
            loss_kld = 0.0
            i = 0
            for block, t_block in zip(self.encoders.blocks, self.teacher_encoders.blocks):
                x, w = block(x, distill=self.distill)
                with torch.no_grad():
                    t_x, t_w = t_block(t_x, distill=self.distill)
                i += 1
                if i > 24:
                    loss_kld += self.KLD_loss(w.reshape(-1, w.shape[-1]).log(), t_w.reshape(-1, t_w.shape[-1]))
            del w, t_x, t_w
        else:  
            for layer, block in enumerate(self.encoders.blocks):
                x = block(x)
                if layer < len(self.encoders.blocks) - 1:
                    x = self.dropout(x)

        x = self.encoders.ln_post(x)

        if ilens is not None:
            olens = (
                1
                + (
                    ilens
                    - self.encoders.conv2.kernel_size[0]
                    + 2 * self.encoders.conv2.padding[0]
                )
                // self.encoders.conv2.stride[0]
            )
            olens = torch.clamp(olens, max=max_pos)
            T = x.size(2)
            masks = ~make_pad_mask(olens, T).unsqueeze(1)
        else:
            masks = None
        if self.distill:
            return x, masks, loss_kld / 8
        return x, masks

    def forward(
        self,
        xs_pad: torch.Tensor,
        ilens: torch.Tensor,
        decoding_chunk_size: int = 0,
        num_decoding_left_chunks: int = -1,
    ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
        if self.do_pad_trim:
            xs_pad = self.pad_or_trim(xs_pad, self.pad_samples)
    
        feats, feats_lens = self.log_mel_spectrogram(xs_pad, ilens)
        raw_shape = xs_pad.shape
        mel_shape = feats.shape

        if self.specaug is not None and self.encoders.training:
            feats = feats.reshape(mel_shape[0], mel_shape[2], mel_shape[1])
            feats, feats_lens = self.specaug(feats, feats_lens)
            feats = feats.reshape(mel_shape[0], mel_shape[1], mel_shape[2])
        
        if self.distill:
            xs_pad, masks, loss_kld = self.whisper_encode(feats, feats_lens)
            return xs_pad, masks, loss_kld
        xs_pad, masks = self.whisper_encode(feats, feats_lens)
        return xs_pad, masks
