import torch
import re
import librosa
from datasets import load_dataset, load_metric, DownloadConfig
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import warnings
import os

os.environ["KMP_AFFINITY"] = ""

LANG_ID = "zh-CN"
MODEL_ID = "wav2vec2-large-chinese-zh-cn"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# config = DownloadConfig(resume_download=True, max_retries=100)
# test_dataset = load_dataset("common_voice", LANG_ID, split="test", download_config=config)

# wer = load_metric("wer", download_config=config)
# cer = load_metric("cer", download_config=config)

processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
model.to(DEVICE)


# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
    batch["speech"] = speech_array
    batch["sentence"] = (
            re.sub("([^\u4e00-\u9fa5\u0030-\u0039])", "", batch["sentence"]).lower() + " "
    )
    return batch


# test_dataset = test_dataset.map(
#     speech_file_to_array_fn,
#     num_proc=15,
#     remove_columns=['client_id', 'up_votes', 'down_votes', 'age', 'gender', 'accent', 'locale', 'segment'],
# )


# Preprocessing the datasets.
# We need to read the audio files as arrays
def evaluate(batch):
    inputs = processor(
        batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True
    )

    with torch.no_grad():
        logits = model(
            inputs.input_values.to(DEVICE),
            attention_mask=inputs.attention_mask.to(DEVICE),
        ).logits

    pred_ids = torch.argmax(logits, dim=-1)
    batch["pred_strings"] = processor.batch_decode(pred_ids)
    return batch


result = test_dataset.map(evaluate, batched=True, batch_size=8)

predictions = [x.lower() for x in result["pred_strings"]]
references = [x.lower() for x in result["sentence"]]

# print(
#     f"WER: {wer.compute(predictions=predictions, references=references, chunk_size=1000) * 100}"
# )
# print(f"CER: {cer.compute(predictions=predictions, references=references) * 100}")
