#!/usr/bin/env python3
# Author: Armit
# Create Time: 周五 2025/08/22

# 使用预训练whisper模型进行编码 (Encode)，查看两个样本的编码差异

from time import time
from pathlib import Path
from argparse import ArgumentParser

import torch
from torchvision.utils import make_grid
from transformers.modeling_outputs import BaseModelOutput
import matplotlib.pyplot as plt

from utils import MODELS, get_whisper_model, get_whisper_latent


@torch.inference_mode
def run(args):
  model_path = f'openai/whisper-{args.model}'
  model, processor = get_whisper_model(model_path)

  def hidden_states_to_image(hidden_states):
    N, L, D = hidden_states.shape
    return make_grid(hidden_states.reshape(N, 1, L, D), nrow=1, pad_value=255).permute(1, 2, 0).cpu()
  def attentions_to_image(attentions):
    N, H, L, _ = attentions.shape
    return make_grid(attentions.reshape(N*H, 1, L, L), pad_value=255).permute(1, 2, 0).cpu()

  encoder_outputs: BaseModelOutput = get_whisper_latent(args.input, model, processor)
  last_hidden_state = encoder_outputs.last_hidden_state[0]          # [L=64, D=512]
  hidden_states = torch.cat(encoder_outputs.hidden_states, dim=0)   # [N+1=7, L=64, D=512]
  attentions = torch.cat(encoder_outputs.attentions, dim=0)         # [N=6, H=8, L=64, L=64]

  if args.input_ref:
    encoder_outputs_ref: BaseModelOutput = get_whisper_latent(args.input_ref, model, processor)
    last_hidden_state_ref = encoder_outputs_ref.last_hidden_state[0]          # [L=64, D=512]
    hidden_states_ref = torch.cat(encoder_outputs_ref.hidden_states, dim=0)   # [N+1=7, L=64, D=512]
    attentions_ref = torch.cat(encoder_outputs_ref.attentions, dim=0)         # [N=6, H=8, L=64, L=64]

    # plot compare
    plt.subplot(121) ; plt.imshow(hidden_states_to_image(hidden_states), norm=None)
    plt.subplot(122) ; plt.imshow(hidden_states_to_image(hidden_states_ref), norm=None)
    plt.suptitle('hidden_states')
    plt.show()
    plt.subplot(121) ; plt.imshow(attentions_to_image(attentions), norm=None)
    plt.subplot(122) ; plt.imshow(attentions_to_image(attentions_ref), norm=None)
    plt.suptitle('attentions')
    plt.show()
  else:
    # plot single
    plt.imshow(hidden_states_to_image(hidden_states), norm=None)
    plt.suptitle('hidden_states')
    plt.show()
    plt.imshow(attentions_to_image(attentions), norm=None)
    plt.suptitle('attentions')
    plt.show()


if __name__ == '__main__':
  parser = ArgumentParser()
  parser.add_argument('-M', '--model', default='base', choices=MODELS, help='model name')
  parser.add_argument('-I', '--input', required=True, type=Path, help='input audio file')
  parser.add_argument('-R', '--input_ref', type=Path, help='ref input audio file')
  args = parser.parse_args()

  assert args.input.is_file()
  print('>> Input:', args.input)
  if args.input_ref:
    assert args.input_ref.is_file()
    print('>> Input-ref:', args.input_ref)

  ts_start = time()
  run(args)
  print(f'>> timecost: {time() - ts_start:.3f}s')
