import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from typing import List, Optional
import re
import threading
import queue

from megatron.training.initialize import initialize_megatron
from megatron.training.training import setup_model_and_optimizer
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.models.speech.whisper_layer_specs import get_whisper_layer_with_transformer_engine_spec
from megatron.core.datasets.speech_dataset import AudioLLMDataset
from megatron.core.models.speech.whisper_model import WhisperMegatron
from tqdm import tqdm
import pdb


def model_provider(
    pre_process=True, post_process=True, add_encoder=True, add_decoder=True,
    parallel_output=True) -> WhisperMegatron:
    transformer_config = TransformerConfig(
        hidden_size=1280,
        num_attention_heads=20,
        num_layers=32,
        layernorm_epsilon=1e-5,
        apply_query_key_layer_scaling=True,
        tensor_model_parallel_size=1
    )
    transformer_layer_spec = get_whisper_layer_with_transformer_engine_spec()
    model = WhisperMegatron(transformer_config, transformer_layer_spec, vocab_size=4352)
    return model

def calculate_wer_async(ref_queue: queue.Queue, results_queue: queue.Queue):
    while True:
        item = ref_queue.get()
        if item is None:
            break
        uttid, ref_str, hyp_str = item
        wer_result, aligned_ref, aligned_hyp = calculate_wer(ref_str, hyp_str)
        results_queue.put((uttid, aligned_ref, aligned_hyp, wer_result))
        ref_queue.task_done()

def calculate_wer(ref: str, hyp: str):
    # Determine if the sentence is primarily Chinese or English using regex
    chinese_characters = re.compile(r'[一-鿿0-9]')
    ref_units = list(ref) if chinese_characters.search(ref) else ref.split()
    hyp_units = list(hyp) if chinese_characters.search(hyp) else hyp.split()

    n = len(ref_units)
    d = [[0] * (len(hyp_units) + 1) for _ in range(len(ref_units) + 1)]

    for i in range(1, len(ref_units) + 1):
        d[i][0] = i
    for j in range(1, len(hyp_units) + 1):
        d[0][j] = j

    for i in range(1, len(ref_units) + 1):
        for j in range(1, len(hyp_units) + 1):
            if ref_units[i - 1] == hyp_units[j - 1]:
                d[i][j] = d[i - 1][j - 1]
            else:
                d[i][j] = min(d[i - 1][j] + 1, d[i][j - 1] + 1, d[i - 1][j - 1] + 1)

    substitutions = 0
    deletions = 0
    insertions = 0
    aligned_ref = []
    aligned_hyp = []
    i, j = len(ref_units), len(hyp_units)
    while i > 0 or j > 0:
        if i > 0 and j > 0 and ref_units[i - 1] == hyp_units[j - 1]:
            aligned_ref.insert(0, ref_units[i - 1])
            aligned_hyp.insert(0, hyp_units[j - 1])
            i -= 1
            j -= 1
        elif i > 0 and j > 0 and d[i][j] == d[i - 1][j - 1] + 1:
            aligned_ref.insert(0, ref_units[i - 1])
            aligned_hyp.insert(0, hyp_units[j - 1])
            substitutions += 1
            i -= 1
            j -= 1
        elif i > 0 and d[i][j] == d[i - 1][j] + 1:
            aligned_ref.insert(0, ref_units[i - 1])
            aligned_hyp.insert(0, "  ")
            deletions += 1
            i -= 1
        elif j > 0 and d[i][j] == d[i][j - 1] + 1:
            aligned_ref.insert(0, "  ")
            aligned_hyp.insert(0, hyp_units[j - 1])
            insertions += 1
            j -= 1

    hits = n - substitutions - deletions
    wer = (substitutions + deletions + insertions) / n if n > 0 else 0

    aligned_ref_str = ' '.join(aligned_ref)
    aligned_hyp_str = ' '.join(aligned_hyp)

    return {
        'wer': wer,
        'substitutions': substitutions,
        'deletions': deletions,
        'insertions': insertions,
        'hits': hits,
        'n': n
    }, aligned_ref_str, aligned_hyp_str

def ctc_decode(idxs):
    decoded = []
    prev_token = None
    for token in idxs:
        if token != prev_token:
            decoded.append(token)
        prev_token = token
    return [t for t in decoded if t != 0]  # Remove blank tokens

def process_batch(model, batch, vocab, ref_queue):
    input_tensor = batch["spectrogram"].cuda()  # [batch, input_dim, seq_len]
    attention_mask = batch["padding_mask"].cuda()  # [batch, 1, 1, seq_len]
    uttids = batch['uttid']
    logits = model(input_tensor, attention_mask)
    idxs = torch.argmax(logits, dim=-1)
    padding_mask = attention_mask.squeeze(1).squeeze(1)
    masked_idxs = idxs * padding_mask

    for uttid, hyp_idx, text_ids in zip(uttids, masked_idxs, batch["text"]):
        hyp_idx = ctc_decode(hyp_idx.tolist())  # Apply CTC decoding to remove repeated tokens
        hyp_seq = [vocab[token] for token in hyp_idx if token != 0]
        ref_seq = [vocab[token.item()] for token in text_ids if token != 0 and token != -1]
        hyp_str = ''.join(hyp_seq)
        ref_str = ''.join(ref_seq)
        ref_queue.put((uttid, ref_str, hyp_str))

def print_statistics(results_queue):
    total_n = total_c = total_s = total_d = total_i = 0
    mandarin_n = mandarin_c = mandarin_s = mandarin_d = mandarin_i = 0
    english_n = english_c = english_s = english_d = english_i = 0

    while not results_queue.empty():
        uttid, aligned_ref, aligned_hyp, wer_result = results_queue.get()
        n = wer_result['n']
        c = wer_result['hits']
        s = wer_result['substitutions']
        d = wer_result['deletions']
        i = wer_result['insertions']
        total_n += n
        total_c += c
        total_s += s
        total_d += d
        total_i += i

        if re.search(r'[一-鿿0-9]', aligned_ref):
            mandarin_n += n
            mandarin_c += c
            mandarin_s += s
            mandarin_d += d
            mandarin_i += i
        else:
            english_n += n
            english_c += c
            english_s += s
            english_d += d
            english_i += i

        wer = wer_result['wer'] * 100
        print(f"utt: {uttid} WER: {wer:.2f} % N={n} C={c} S={s} D={d} I={i}")
        print(f"lab: {aligned_ref}")
        print(f"rec: {aligned_hyp}")

    print("===========================================================================")
    overall_wer = (total_s + total_d + total_i) / total_n * 100 if total_n > 0 else 0
    print(f"Overall -> {overall_wer:.2f} % N={total_n} C={total_c} S={total_s} D={total_d} I={total_i}")
    mandarin_wer = (mandarin_s + mandarin_d + mandarin_i) / mandarin_n * 100 if mandarin_n > 0 else 0
    print(f"Mandarin -> {mandarin_wer:.2f} % N={mandarin_n} C={mandarin_c} S={mandarin_s} D={mandarin_d} I={mandarin_i}")
    english_wer = (english_s + english_d + english_i) / english_n * 100 if english_n > 0 else 0
    print(f"English -> {english_wer:.2f} % N={english_n} C={english_c} S={english_s} D={english_d} I={english_i}")
    print("===========================================================================")

# Example usage
if __name__ == "__main__":
    # Initialize Megatron and setup model
    initialize_megatron()
    model_type = "encoder_or_decoder"
    model, optimizer, opt_param_scheduler = setup_model_and_optimizer(model_provider, model_type)
    model = model[0]
    model.eval()
    # Define dataset and data loader
    whisper_path = "/apdcephfs_qy3/share_976139/users/joyounglv/pretrained_ckpt/whisper-large-v3"
    vocab_file = "/apdcephfs_qy3/share_976139/users/adrenzhou/data/aishell/dict/lang_char.txt"
    dataset = AudioLLMDataset("/apdcephfs_qy3/share_976139/users/joyounglv/audiollama/data/aishell1/test_asr_aishell1.json", whisper_path, vocab_file, ret_id=True, data_aug=False)
    data_loader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=64)

    # Load vocabulary
    with open(vocab_file, 'r', encoding='utf-8') as f:
        vocab = [line.strip() for line in f]
    vocab = {int(x.split()[1]): x.split()[0] for x in vocab}

    # Set up queues and threading for asynchronous WER calculation
    ref_queue = queue.Queue()
    results_queue = queue.Queue()
    wer_thread = threading.Thread(target=calculate_wer_async, args=(ref_queue, results_queue))
    wer_thread.start()

    # Iterate over dataset
    with torch.no_grad():
        for batch in tqdm(data_loader):
            process_batch(model, batch, vocab, ref_queue)

    # Wait for all WER calculations to complete
    ref_queue.join()
    ref_queue.put(None)
    wer_thread.join()

    # Print statistics
    print_statistics(results_queue)
