import torch
import librosa
import warnings
import os
import re
import json
import time
import numpy as np
import pandas as pd
from metrics import metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor

CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", "；", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
                  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
                  "{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
                  "、", "﹂", "﹁", "‧", "～", "﹏", "，", "｛", "｝", "（", "）", "［", "］", "【", "】", "‥", "〽",
                  "『", "』", "〝", "〟", "⟨", "⟩", "〜", "：", "！", "？", "♪", "؛", "/", "\\", "º", "−", "^", "'", "ʻ", "ˆ"]

chars_to_ignore_regex = f"[{re.escape(''.join(CHARS_TO_IGNORE))}]"

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model_path = r"wav2vec2-large-chinese-zh-cn"
wav_path = r"/home/asr_deploy_test/asr_test_file/Test_Ali/Test_Ali_far/audio_dir/"
test_path = "/home/asr_deploy_test/asr_test_file/Test_Ali/Test_Ali_far/audio_dir"
test_sentence_path = '/home/asr_deploy_test/asr_test_file/dataset/test_set.json'
mask_prob = 0.0
mask_length = 10

processor = Wav2Vec2Processor.from_pretrained(model_path)
model = Wav2Vec2ForCTC.from_pretrained(model_path)
model.to(device)


def speech_file_to_array_fn(batch):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")

        speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
        batch["speech"] = speech_array
        batch["sentence"] = (
                re.sub("([^\u4e00-\u9fa5\u0030-\u0039])", "", batch["sentence"]).lower()
        )
    return batch


def one_list_test():
    batch = {"path": wav_path, "sentence": "我认为跑步最重要的就是给我带来了什么是健康"}
    batch = speech_file_to_array_fn(batch)
    inputs = processor(
            batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True
        )
    with torch.no_grad():
        logits = model(
            inputs.input_values.to(device),
            attention_mask=inputs.attention_mask.to(device),
        ).logits

        pred_ids = torch.argmax(logits, dim=-1)
        batch["pred_strings"] = processor.batch_decode(pred_ids)

    print(batch['pred_strings'])
    print(batch['speech'])


def evaluation():
    file_dict = {}
    for dir in os.listdir(test_path):
        files_path = os.path.join(test_path, dir)
        for file in os.listdir(files_path):
            path = os.path.join(files_path, file)
            file_dict[file] = path

    with open(test_sentence_path, 'r', encoding='utf-8') as fp:
        answer = json.load(fp)
    batch_list = []
    for file in file_dict:
        if file in answer:
            batch = {"path": file_dict[file], "sentence": answer[file]}
            batch_list.append(batch)
    cer_list = []
    for batch in batch_list:
        t1 = time.time()
        batch = speech_file_to_array_fn(batch)

        inputs = processor(
            batch["speech"], sampling_rate=16000, return_tensors="pt", padding=True
        )
        with torch.no_grad():
            logits = model(
                inputs.input_values.to(device),
                attention_mask=inputs.attention_mask.to(device),
            ).logits

            pred_ids = torch.argmax(logits, dim=-1)
            batch["pred_strings"] = processor.batch_decode(pred_ids)[0]
        t2 = time.time()
        print(f"Inference time is {(t2 - t1)} s")
        batch["sentence"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).upper()
        if len(batch["sentence"]) != len(batch["pred_strings"]):
            print("File {} Error, True: {}, Pred: {}".format(batch["path"], batch["sentence"], batch["pred_strings"]))
            continue
        cer = metric.compute_cer(predictions=batch['pred_strings'], references=batch['sentence'])
        cer_list.append(cer)
        print(cer)
    df = pd.DataFrame(columns=['file_name','sentence', 'pred_strings'])
    for batch in batch_list:
        df.loc[len(df)] = [batch['path'], batch['sentence'], batch['pred_strings']]
    print(f"Mean of CER of test set is: {np.mean(cer_list)}")
    df.to_csv('wav2_vec_evaluaton.csv', index=False)


if __name__ == '__main__':
    # one_list_test()
    evaluation()
