import json
import os
import librosa
import warnings
import re
import numpy as np
import time 
import pandas as pd
import zhconv
import jiwer
import gc
from transformers import WhisperForConditionalGeneration, WhisperProcessor
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
import torch
CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", "；", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
                  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
                  "{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
                  "、", "﹂", "﹁", "‧", "～", "﹏", "，", "｛", "｝", "（", "）", "［", "］", "【", "】", "‥", "〽",
                  "『", "』", "〝", "〟", "⟨", "⟩", "〜", "：", "！", "？", "♪", "؛", "/", "\\", "º", "−", "^", "'", "ʻ", "ˆ"]

chars_to_ignore_regex = f"[{re.escape(''.join(CHARS_TO_IGNORE))}]"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# test_path = "/home/asr_deploy_test/asr_test_file/dataset/test/wav"
# test_sentence_path = '/home/asr_deploy_test/asr_test_file/dataset/test_set.json'
wav_path = "/home/asr_deploy_test/asr_test_file/Test_Ali/Test_Ali_far/audio_dir/R8009_M8028.wav"
test_path = "/home/shilin.zhuang.o/asr/Test_Ali/Test_Ali_far/audio_dir/"
test_sentence_path = '/home/shilin.zhuang.o/asr/dataset/long_sentence_test_set.json'
# processor = WhisperProcessor.from_pretrained("/home/asr_deploy_test/asr_test_file/whisper-model/whisper-large-v2")
# model = WhisperForConditionalGeneration.from_pretrained("/home/asr_deploy_test/asr_test_file/whisper-model/whisper-large-v2").to("cuda")
processor = WhisperProcessor.from_pretrained("whisper-large-v2")
model = WhisperForConditionalGeneration.from_pretrained("whisper-large-v2")
model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="chinese", task="transcribe")
model.to(device)
part_wav_path = r'part_wav_path'


def speech_file_to_array_fn(batch):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")

        speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
        batch["speech"] = speech_array
        batch["sentence"] = (
                re.sub("([^\u4e00-\u9fa5\u0030-\u0039])", "", batch["sentence"]).lower()
        )
    return batch


def map_to_pred(batch):
    audio = batch
    input_features = processor(audio["speech"], sampling_rate=16000, return_tensors="pt").input_features
    batch["reference"] = processor.tokenizer._normalize(batch['sentence'])
    with torch.no_grad():
        predicted_ids = model.generate(input_features.to(device))[0]
    transcription = processor.decode(predicted_ids)
    batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
    return batch


def one_list_test():
    with open(test_sentence_path, 'r', encoding='utf-8') as fp:
        answer = json.load(fp)
    res_str = ''
    for wav in os.listdir(part_wav_path):
        wav_path = os.path.join(part_wav_path, wav)
        batch = {"path": wav_path, "sentence": answer['R8009_M8028.wav'][0]}
        batch = speech_file_to_array_fn(batch)
        t1 = time.time()
        input_features = processor(batch["speech"], sampling_rate=16000, return_tensors="pt").input_features
        with torch.no_grad():
            predicted_ids = model.generate(input_features.to(device))[0]
        transcription = processor.decode(predicted_ids)
        batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
        t2 = time.time()
        print("耗时", t2 - t1)
        res_str = res_str + batch['pred_strings']
        # print(batch['sentence'])
    print(res_str)


def compute_cer(predictions, references, chunk_size=None):
    if chunk_size is None:
        preds = [char for seq in predictions for char in list(seq)]
        refs = [char for seq in references for char in list(seq)]
        return jiwer.wer(refs, preds)
    start = 0
    end = chunk_size
    H, S, D, I = 0, 0, 0, 0
    while start < len(references):
        preds = [char for seq in predictions[start:end] for char in list(seq)]
        refs = [char for seq in references[start:end] for char in list(seq)]
        chunk_metrics = jiwer.compute_measures(refs, preds)
        H = H + chunk_metrics["hits"]
        S = S + chunk_metrics["substitutions"]
        D = D + chunk_metrics["deletions"]
        I = I + chunk_metrics["insertions"]
        start += chunk_size
        end += chunk_size
        del preds
        del refs
        del chunk_metrics
        gc.collect()
    return float(S + D + I) / float(H + S + D)


def evaluation():
    file_dict = {}
    for file in os.listdir(test_path):
        path = os.path.join(test_path, file)
        file_dict[file] = path
    with open(test_sentence_path, 'r', encoding='utf-8') as fp:
        answer = json.load(fp)
    batch_list = []
    for file in file_dict:
        if file in answer:
            # batch = {"path": file_dict[file], "sentence": answer[file]}
            batch = {"path": file_dict[file], "sentence": answer[file][0]}
            batch_list.append(batch)
    cer_list = []
    for batch in batch_list:
        t1 = time.time()
        batch = speech_file_to_array_fn(batch)

        inputs = processor(
            batch["speech"], sampling_rate=16000, return_tensors="pt", padding=True
        )
        batch = map_to_pred(batch)
        t2 = time.time()
        print(f"True is {batch['sentence']}")
        print(f"answer is : {batch['pred_strings']}")
        print(f"Inference time is {(t2 - t1)} s")
        batch["sentence"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).upper()
        if len(batch["sentence"]) != len(batch["pred_strings"]):
            print("File {} Error, True: {}, Pred: {}".format(batch["path"], batch["sentence"], batch["pred_strings"]))
            continue
        cer = compute_cer(predictions=batch['pred_strings'], references=batch['sentence'])
        cer_list.append(cer)
        print(cer)
    df = pd.DataFrame(columns=['file_name','sentence', 'pred_strings'])
    for batch in batch_list:
        df.loc[len(df)] = [batch['path'], batch['sentence'], batch['pred_strings']]
    print(f"Mean of CER of test set is: {np.mean(cer_list)}")
    df.to_csv('Whisper_speech_evaluation.csv', index='False')


def muti_model_evaluation():
    file_dict = {}
    for file in os.listdir(test_path):
        path = os.path.join(test_path, file)
        file_dict[file] = path
    with open(test_sentence_path, 'r', encoding='utf-8') as fp:
        answer = json.load(fp)
    batch_list = []
    for file in file_dict:
        if file in answer:
            # batch = {"path": file_dict[file], "sentence": answer[file]}
            batch = {"path": file_dict[file], "sentence": answer[file][0]}
            batch_list.append(batch)
    whipser_cer_list = []
    parformer_cer_list = []
    for batch in batch_list:
        t1 = time.time()
        batch = speech_file_to_array_fn(batch)
        inputs = processor(
            batch["speech"], sampling_rate=16000, return_tensors="pt", padding=True
        )
        batch = map_to_pred(batch)




if __name__ == "__main__":
    #one_list_test()
    evaluation()
