import json
import os
import librosa
import warnings
import re
import numpy as np
import time 
import pandas as pd
import zhconv
from metrics import metric
from transformers import WhisperForConditionalGeneration, WhisperProcessor
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
import torch
CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", "；", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
                  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
                  "{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
                  "、", "﹂", "﹁", "‧", "～", "﹏", "，", "｛", "｝", "（", "）", "［", "］", "【", "】", "‥", "〽",
                  "『", "』", "〝", "〟", "⟨", "⟩", "〜", "：", "！", "？", "♪", "؛", "/", "\\", "º", "−", "^", "'", "ʻ", "ˆ"]

chars_to_ignore_regex = f"[{re.escape(''.join(CHARS_TO_IGNORE))}]"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_path = "/home/shilin.zhuang.o/asr_deploy_test/asr_test_file/dataset/test/wav"
test_sentence_path = '/home/shilin.zhuang.o/asr_deploy_test/asr_test_file/dataset/test_set.json'
wav_path = "/home/shilin.zhuang.o/asr_deploy_test/asr_test_file/Test_Ali/Test_Ali_far/audio_dir/R8009_M8028.wav"
# processor = WhisperProcessor.from_pretrained("/home/asr_deploy_test/asr_test_file/whisper-model/whisper-large-v2")
# model = WhisperForConditionalGeneration.from_pretrained("/home/asr_deploy_test/asr_test_file/whisper-model/whisper-large-v2").to("cuda")
processor = WhisperProcessor.from_pretrained("whisper-large-v2")
model = WhisperForConditionalGeneration.from_pretrained("whisper-large-v2")
model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="chinese", task="transcribe")
model.to(device)
part_wav_path = r'part_wav_path'


def speech_file_to_array_fn(batch):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")

        speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
        batch["speech"] = speech_array
        batch["sentence"] = (
                re.sub("([^\u4e00-\u9fa5\u0030-\u0039])", "", batch["sentence"]).lower()
        )
    return batch


def map_to_pred(batch):
    audio = batch
    input_features = processor(audio["speech"], sampling_rate=16000, return_tensors="pt").input_features
    batch["reference"] = processor.tokenizer._normalize(batch['sentence'])

    with torch.no_grad():
        predicted_ids = model.generate(input_features.to(device))[0]
    transcription = processor.decode(predicted_ids)
    batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
    return batch

def one_list_test():
    wav_path = "/home/shilin.zhuang.o/asr_deploy_test/asr_test_file/dataset/test/wav/SSB0005/SSB00050353.wav"
    batch = {"path": wav_path, "sentence": "你好"}
    batch = speech_file_to_array_fn(batch)
    t1 = time.time()
    input_features = processor(batch["speech"], sampling_rate=16000, return_tensors="pt").input_features
    with torch.no_grad():
        predicted_ids = model.generate(input_features.to(device))[0]
    transcription = processor.decode(predicted_ids)
    batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
    t2 = time.time()
    print("耗时", t2 - t1)
    print("转述文本为：", batch['pred_strings'])


def evaluation():
    file_dict = {}
    for file in os.listdir(test_path):
        path = os.path.join(test_path, file)
        file_dict[file] = path
    with open(test_sentence_path, 'r', encoding='utf-8') as fp:
        answer = json.load(fp)
    batch_list = []
    for file in file_dict:
        if file in answer:
            # batch = {"path": file_dict[file], "sentence": answer[file]}
            batch = {"path": file_dict[file], "sentence": answer[file][0]}
            batch_list.append(batch)
    cer_list = []
    for batch in batch_list:
        t1 = time.time()
        batch = speech_file_to_array_fn(batch)

        inputs = processor(
            batch["speech"], sampling_rate=16000, return_tensors="pt", padding=True
        )
        batch = map_to_pred(batch)
        t2 = time.time()
        print(f"True is {batch['sentence']}")
        print(f"answer is : {batch['pred_strings']}")
        print(f"Inference time is {(t2 - t1)} s")
        batch["sentence"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).upper()
        if len(batch["sentence"]) != len(batch["pred_strings"]):
            print("File {} Error, True: {}, Pred: {}".format(batch["path"], batch["sentence"], batch["pred_strings"]))
            continue
        cer = compute_cer(predictions=batch['pred_strings'], references=batch['sentence'])
        cer_list.append(cer)
        print(cer)
    df = pd.DataFrame(columns=['file_name','sentence', 'pred_strings'])
    for batch in batch_list:
        df.loc[len(df)] = [batch['path'], batch['sentence'], batch['pred_strings']]
    print(f"Mean of CER of test set is: {np.mean(cer_list)}")
    df.to_csv('Whisper_speech_evaluation.csv', index='False')


if __name__ == "__main__":
    # one_list_test()
    evaluation()
