import sys, os
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import pandas as pd
import cv2, traceback, subprocess
import torch
from tqdm import tqdm
from glob import glob
import click
import whisper
from torch.nn import functional as F
import numpy as np
from ezds.ezdlearn.utils.load import load_video_by_frame

#%%
def whisper_encode(model, x):
    x = F.gelu(model.conv1(x))
    x = F.gelu(model.conv2(x))
    x = x.permute(0, 2, 1)
    pe = model.positional_embedding[:x.shape[1], :]
    x = (x + pe).to(x.dtype)
    for block in model.blocks:
        x = block(x)
    x = model.ln_post(x)
    return x

def split_array(arr, split_size):
    n = max(len(arr) // split_size, 1)
    split_ns = [split_size * (i+1) for i in range(n)]
    outputs = np.array_split(arr,  split_ns)
    if len(outputs[-1]) == 0:
        outputs = outputs[:-1]
    return outputs
		
def process_audio_file(afile, model):
    dirname = os.path.dirname(afile)
    fullpath = os.path.join(dirname, "audio.pkl")
    audio = whisper.load_audio(afile)
    audios = split_array(audio, 480000)
    mels = [whisper.log_mel_spectrogram(audio) for audio in audios]
    #%%
    with torch.no_grad():
        embeds = [whisper_encode(model.encoder, mel[None, ...].cuda()) for mel in mels]
        embed = torch.cat(embeds, dim=1)
    torch.save({"embed":embed.squeeze(0).cpu()}, fullpath)


	
def mp_handler(args):
    vfile, preprocessed_root = args
    try:
        process_audio_file(vfile, preprocessed_root)
    except KeyboardInterrupt:
        exit(0)
    except:
        traceback.print_exc()

@click.command()
@click.option("-d", "--data_root", type=str, required=True, help="wav data root path")
@click.option("-t", "--nthread", type=int, default=1, help="num threads")
def main(data_root, nthread):
    models = [whisper.load_model('tiny').eval() for _ in range(nthread)]
    filelist = glob(os.path.join('preprocess_data', '*/*/*.wav'))
    jobs = [(afile, models[int(i%nthread)]) for i, afile in enumerate(filelist)]
    p = ThreadPoolExecutor(nthread)
    futures = [p.submit(mp_handler, j) for j in jobs]
    _ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))]
    p.shutdown()

if __name__ == "__main__":
    data_root = 'preprocessed_data'
    nthread = 3
    models = [whisper.load_model('tiny').eval() for _ in range(nthread)]
    filelist = glob(os.path.join('preprocess_data', '*/*/*.wav'))
    jobs = [(afile, models[int(i%nthread)]) for i, afile in enumerate(filelist)]
    p = ThreadPoolExecutor(nthread)
    futures = [p.submit(mp_handler, j) for j in jobs]
    _ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))]
    p.shutdown()