import sys, os
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import pandas as pd
import cv2, traceback, subprocess
import torch
from tqdm import tqdm
from glob import glob
import click
import whisper
from torch.nn import functional as F
import numpy as np
from utils.deep_speech import DeepSpeech
from ezds.ezdlearn.utils.load import load_video_by_frame

#%%
deepspeech_model_path = "/app/git/DINet/asserts/output_graph.pb"

def split_array(arr, split_size):
    n = max(len(arr) // split_size, 1)
    split_ns = [split_size * (i+1) for i in range(n)]
    outputs = np.array_split(arr,  split_ns)
    if len(outputs[-1]) == 0:
        outputs = outputs[:-1]
    return outputs
		
def process_audio_file(afile, model):
    dirname = os.path.dirname(afile)
    fullpath = os.path.join(dirname, "dsp.pkl")
    embed = model.compute_audio_feature(afile)
    embed = torch.from_numpy(embed).float().cuda()
    torch.save({"embed":embed}, fullpath)
	
def mp_handler(args):
    vfile, preprocessed_root = args
    try:
        process_audio_file(vfile, preprocessed_root)
    except KeyboardInterrupt:
        exit(0)
    except:
        traceback.print_exc()

@click.command()
@click.option("-d", "--data_root", type=str, required=True, help="wav data root path")
@click.option("-t", "--nthread", type=int, default=1, help="num threads")
def main(data_root, nthread):
    models = [DeepSpeech(deepspeech_model_path) for _ in range(nthread)]
    filelist = glob(os.path.join('preprocess_data', '*/*/*.wav'))
    jobs = [(afile, models[int(i%nthread)]) for i, afile in enumerate(filelist)]
    p = ThreadPoolExecutor(nthread)
    futures = [p.submit(mp_handler, j) for j in jobs]
    _ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))]
    p.shutdown()

if __name__ == "__main__":
    data_root = 'preprocessed_data'
    nthread = 1
    models = [DeepSpeech(deepspeech_model_path) for _ in range(nthread)]
    filelist = glob(os.path.join('preprocess_data', 'samples/*/*.wav'))
    jobs = [(afile, models[int(i%nthread)]) for i, afile in enumerate(filelist)]
    p = ThreadPoolExecutor(nthread)
    futures = [p.submit(mp_handler, j) for j in jobs]
    _ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))]
    p.shutdown()