import os

import torch
import torch.multiprocessing as mp
import torchaudio
import joblib

import threading
import math
import numpy as np
import itertools
from tqdm import tqdm
from pathlib import Path

import sys

sys.path.append('/home/work_nfs5_ssd/hfxue/UniSpeech/')
sys.path.append('/home/work_nfs5_ssd/hfxue/UniSpeech/WavLM/')
# sys.path.append('/home/work_nfs11/hfxue/fairseq_hubert')
# sys.path.append('/home/work_nfs8/xlgeng/new_workspace/fairseq')
from WavLM import WavLM, WavLMConfig
from fairseq.data.audio.audio_utils import get_features_or_waveform
import re

file_name = 'gxl_data'
LOGGING_INTERVAL = 5
OFFSET = 0
NUM_THREADS = 8
BATCH_SIZE = 1
INPUT_DIR = Path('/home/work_nfs14/xlgeng/asr_data_shard/wenetspeech4tts')
input_guard = lambda path: "24k" in path.as_posix()
FEATURE_OUTPUT_DIR = Path('/home/work_nfs14/xlgeng/asr_data_raw/wenetspeech4tts/WavLMFeature') / file_name
TOKEN_OUTPUT_DIR = Path('/home/work_nfs14/xlgeng/asr_data_raw/wenetspeech4tts/TOKEN_4096') / file_name
FILE_LIST = '/home/work_nfs14/xlgeng/asr_data_raw/wenetspeech4tts/filename_16k.lst'
# os.makedirs(FILE_LIST, exist_ok=True)
os.makedirs(FEATURE_OUTPUT_DIR, exist_ok=True)
os.makedirs(TOKEN_OUTPUT_DIR, exist_ok=True)


KMEANS_PATH = "/home/work_nfs8/xlgeng/new_workspace/checkpoint/wavlm_pt/WavLM-Large-4096.mdl"
WAVLM_PATH = "/home/work_nfs8/xlgeng/new_workspace/checkpoint/wavlm_pt/WavLM-Large.pt"
OUTPUT_LAYER = 6


class ApplyKmeans(object):
    def __init__(self, km_path, device):
        self.km_model = joblib.load(km_path)
        self.C_np = self.km_model.cluster_centers_.transpose()
        # self.C_np = self.km_model.cluster_centers_
        self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True)
        # self.Cnorm_np = ((self.C_np.T)**2).sum(0, keepdims=True)
        self.C = torch.from_numpy(self.C_np).to(device)
        self.Cnorm = torch.from_numpy(self.Cnorm_np).to(device)

    def __call__(self, x):
        dist = x.pow(2).sum(1, keepdim=True) - 2 * torch.matmul(x, self.C) + self.Cnorm
        # feature = x
        # dist = np.sum(np.power(feature, 2), axis=1, keepdims=True) -2 * np.matmul(feature, self.C_np.T) + self.Cnorm_np
        # token = np.argmin(dist, axis=-1)
        return dist.argmin(dim=-1)


def inference(rank, queue: mp.Queue):
    # def get_audio(path):
    #     sample, sr = torchaudio.load(path)
    #     sample = torchaudio.functional.resample(sample, sr, 16000).reshape(-1)
    #     return sample

    def get_audio(path):
        wav = get_features_or_waveform(path, need_waveform=True, use_sample_rate=16000)
        if wav.ndim == 2:
            wav = wav.mean(-1)
        wav = torch.FloatTensor(wav)
        return wav

    device = torch.device("cuda", OFFSET + rank)

    # LOAD WAVLM
    cpt = torch.load(WAVLM_PATH, map_location="cpu")
    cfg = WavLMConfig(cpt["cfg"])
    wavlm: torch.nn.Module = WavLM(cfg)
    wavlm.load_state_dict(cpt["model"])
    wavlm = wavlm.eval()
    wavlm = wavlm.requires_grad_(False)
    wavlm = wavlm.to(device)

    # LOAD KMEANS
    kmeans = ApplyKmeans(KMEANS_PATH, device)
    while True:
        paths = queue.get()
        if paths is None:
            break
        file_names = [path.stem for path in paths]
        samples = [get_audio(path) for path in paths]
        lengths = [math.ceil(sample.shape[-1] / 320) for sample in samples]

        try:
            batched_samples = torch.nn.utils.rnn.pad_sequence(
                samples, batch_first=True
            ).to(device)
            features = wavlm.extract_features(
                batched_samples,
                output_layer=OUTPUT_LAYER,
                ret_layer_results=False,
            )[0]
            b, t, d = features.shape
            tokens = kmeans(features.reshape(-1, d)).reshape(b, t)
            # print(features.shape, tokens.shape)
            for feature, token, file_name, length in zip(
                    features.cpu().numpy(), tokens.cpu().numpy(), file_names, lengths
            ):
                # pass
                np.save(FEATURE_OUTPUT_DIR / f"{file_name}.npy", feature[:length, :])
                np.save(TOKEN_OUTPUT_DIR / f"{file_name}.npy", token[:length])

        except Exception as e:
            print(f"{e} in {paths} with longest length of {max(lengths)}")


def setInterval(interval):
    def decorator(function):
        def wrapper(*args, **kwargs):
            stopped = threading.Event()

            def loop():  # executed in another thread
                while not stopped.wait(interval):  # until stopped
                    function(*args, **kwargs)

            t = threading.Thread(target=loop)
            t.daemon = True  # stop if the program exits
            t.start()
            return stopped

        return wrapper

    return decorator


last_batches = None


@setInterval(LOGGING_INTERVAL)
def QueueWatcher(queue):
    global last_batches
    curr_batches = queue.qsize()
    print(
        f"Remain: {curr_batches} batches [ {(last_batches - curr_batches) / LOGGING_INTERVAL} batches/s ]"
    )
    last_batches = curr_batches


if __name__ == "__main__":
    mp.set_start_method('spawn', force=True)
    FEATURE_OUTPUT_DIR.mkdir(exist_ok=True)
    TOKEN_OUTPUT_DIR.mkdir(exist_ok=True)

    # import ipdb; ipdb.set_trace()
    # 使用正则表达式来提取wav_id和wav_path
    wav_path = []
    with open(INPUT_DIR / "wav.scp") as fr:
        data = fr.readlines()
        for data_ in data:
            wav_path.append(data_.split()[-1])

    # 先处理1/100意思一下
    print('先处理1/100意思一下')
    wav_path = wav_path[::100]


    print(f"Running with {NUM_THREADS} threads and batchsize {BATCH_SIZE}")
    processes = []
    queue = mp.Queue()
    for rank in range(NUM_THREADS):
        p = mp.Process(target=inference, args=(rank, queue))
        p.start()
        processes.append(p)

    accum = []
    tmp_file = []
    for file in wav_path:
        file = Path(file)
        accum.append(file)
        if len(accum) == BATCH_SIZE:
            queue.put(accum.copy())
            accum.clear()
        tmp_file.append(file.as_posix() + '\n')

    for _ in range(NUM_THREADS):
        queue.put(None)

    last_batches = queue.qsize()
    queue_watcher = QueueWatcher(queue)
    for p in processes:
        p.join()
    queue_watcher.set()

    f_w = open(FILE_LIST, 'a')
    f_w.writelines(tmp_file)
    f_w.close()


