from typing import Optional, Tuple, Union

import torch
from transformers.modeling_outputs import Seq2SeqLMOutput
from transformers.models.whisper.modeling_whisper import WhisperDecoder, WhisperForConditionalGeneration


class MyWhisperDecoder(WhisperDecoder):

    def forward(
            self,
            input_ids=None,
            attention_mask=None,
            encoder_hidden_states=None,
            head_mask=None,
            cross_attn_head_mask=None,
            past_key_values=None,
            inputs_embeds=None,
            position_ids=None,
            use_cache=None,
            output_attentions=None,
            output_hidden_states=None,
            return_dict=None,
    ):
        print('hello')
        super().forward(input_ids, attention_mask, encoder_hidden_states, head_mask, cross_attn_head_mask,
                        past_key_values, inputs_embeds, position_ids, use_cache, output_attentions,
                        output_hidden_states, return_dict)


class MyWhisperForConditionalGeneration(WhisperForConditionalGeneration):
    def forward(
            self,
            input_features: Optional[torch.FloatTensor] = None,
            attention_mask: Optional[torch.LongTensor] = None,
            decoder_input_ids: Optional[torch.LongTensor] = None,
            decoder_attention_mask: Optional[torch.LongTensor] = None,
            head_mask: Optional[torch.Tensor] = None,
            decoder_head_mask: Optional[torch.Tensor] = None,
            cross_attn_head_mask: Optional[torch.Tensor] = None,
            encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
            past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
            decoder_inputs_embeds: Optional[Tuple[torch.FloatTensor]] = None,
            decoder_position_ids: Optional[Tuple[torch.LongTensor]] = None,
            labels: Optional[torch.LongTensor] = None,
            use_cache: Optional[bool] = None,
            output_attentions: Optional[bool] = None,
            output_hidden_states: Optional[bool] = None,
            return_dict: Optional[bool] = None,
            prompt: Optional[str] = None
    ):
        return super().forward(input_features, attention_mask, decoder_input_ids, decoder_attention_mask, head_mask,
                               decoder_head_mask, cross_attn_head_mask, encoder_outputs, past_key_values,
                               decoder_inputs_embeds, decoder_position_ids, labels, use_cache, output_attentions,
                               output_hidden_states, return_dict)
