import os
import json

import whisper
from whisper import ModelDimensions
from whisper import _ALIGNMENT_HEADS
from whisper import Whisper

import torch
import torchaudio
import torchaudio.compliance.kaldi as kaldi

whisper_model = "large-v2"
checkpoint = torch.load(f"/home/work_nfs5_ssd/pkchen/workspace/whisper/{whisper_model}.pt", map_location='cpu')
dims = ModelDimensions(**checkpoint["dims"])
model = Whisper(dims)
model.set_alignment_heads(_ALIGNMENT_HEADS[whisper_model])

exp = "exp/whisper_large_v1"
ckpt = "avg_3.pt"
md = torch.load(f"{exp}/{ckpt}")
nd = {}
for k, v in md.items():
    k = k.replace("encoders.", "")
    k = k.replace("decoders.", "")
    nd[k] = v
model.load_state_dict(nd)
try:
    model.cuda()
except:
    print("decode on cpu")
options = whisper.DecodingOptions(language="zh", without_timestamps=True)

test_set = "test_net"
fin = f"data/test/{test_set}/data.list"
fout = f"{exp}/whisper_reg/{ckpt}/{test_set}"
if not os.path.exists(fout):
    os.makedirs(fout)
with open(f"{fout}/text", 'w') as fo:
    with open(fin, 'r') as fi:
        for line in fi:
            obj = json.loads(line)
            assert 'key' in obj
            assert 'wav' in obj
            waveform, sample_rate = torchaudio.load(obj['wav'])
            waveform = waveform * (1 << 15)
            
            audio = whisper.pad_or_trim(waveform)
            mels = whisper.log_mel_spectrogram(audio).to(model.device)
            results = model.decode(mels, options)
            print(f"{obj['key']} {results[0].text}")
            fo.write(f"{obj['key']} {results[0].text}\n")
