import json
import os
import librosa
import warnings
import re
import numpy as np
import time 
import pandas as pd
import zhconv
from metrics import metric
from transformers import WhisperForConditionalGeneration, WhisperProcessor
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
import torch

test_path = "/home/asr_deploy_test/asr_test_file/dataset/test/wav"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", "；", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
				  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
				  "{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
				  "、", "﹂", "﹁", "‧", "～", "﹏", "，", "｛", "｝", "（", "）", "［", "］", "【", "】", "‥", "〽",
				  "『", "』", "〝", "〟", "⟨", "⟩", "〜", "：", "！", "？", "♪", "؛", "/", "\\", "º", "−", "^", "'", "ʻ", "ˆ"]

chars_to_ignore_regex = f"[{re.escape(''.join(CHARS_TO_IGNORE))}]"

def load_model():
	#test_sentence_path = '/home/asr_deploy_test/asr_test_file/dataset/test_set.json'
	#wav_path = "/home/asr_deploy_test/asr_test_file/Test_Ali/Test_Ali_far/audio_dir/R8009_M8028.wav"
	# processor = WhisperProcessor.from_pretrained("/home/asr_deploy_test/asr_test_file/whisper-model/whisper-large-v2")
	# model = WhisperForConditionalGeneration.from_pretrained("/home/asr_deploy_test/asr_test_file/whisper-model/whisper-large-v2").to("cuda")
	processor = WhisperProcessor.from_pretrained("whisper-large-v2")
	model = WhisperForConditionalGeneration.from_pretrained("whisper-large-v2")
	model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="chinese", task="transcribe")
	model.to(device)
	return model, processor


def speech_file_to_array_fn(batch):
	with warnings.catch_warnings():
		warnings.simplefilter("ignore")

		speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
		batch["speech"] = speech_array
		batch["sentence"] = (
				re.sub("([^\u4e00-\u9fa5\u0030-\u0039])", "", batch["sentence"]).lower()
		)
	return batch


def map_to_pred(batch):
	audio = batch
	input_features = processor(audio["speech"], sampling_rate=16000, return_tensors="pt").input_features
	batch["reference"] = processor.tokenizer._normalize(batch['sentence'])

	with torch.no_grad():
		predicted_ids = model.generate(input_features.to(device))[0]
	transcription = processor.decode(predicted_ids)
	batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
	return batch

def test_each(wav_path, model, processor):
	#wav_path = "/home/asr_deploy_test/asr_test_file/dataset/test/wav/SSB0005/SSB00050353.wav"
	batch = {"path": wav_path, "sentence": "你好"}
	batch = speech_file_to_array_fn(batch)
	t1 = time.time()
	input_features = processor(batch["speech"], sampling_rate=16000, return_tensors="pt").input_features
	with torch.no_grad():
		predicted_ids = model.generate(input_features.to(device))[0]
	transcription = processor.decode(predicted_ids)
	batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
	t2 = time.time()
	print("耗时", t2 - t1)
	print("转述文本为：", batch['pred_strings'])


def test_all_file():
	# 用于存储所有找到的 WAV 文件的列表
	wav_files = []
	# 遍历指定目录下的子目录
	for root, dirs, files in os.walk(test_path):
		for file in files:
			if file.endswith(".wav"):
				# 找到一个 WAV 文件，将其完整路径加入列表
				wav_path = os.path.join(root, file)
				print(f"{wav_path=}")
				wav_files.append(wav_path)

	# 打印所有找到的 WAV 文件的完整路径
	for wav_file in wav_files:
		print(wav_file)
	return wav_files

if __name__ == "__main__":
	print("loading model ...")
	t1 = time.time()
	model, processor = load_model()
	t2 = time.time()
	print(f"model loaded, {t2-t1} secs")
	wav_files= test_all_file()
	for wav in wav_files:
		print(f"Whispering {wav=} ...")
		test_each(wav, model, processor)
