import os
import librosa
import warnings
import re
import time
import zhconv
import pydub
import asyncio
import streamlit as st
import scipy.io.wavfile
import speech_recognition as sr
from pydub import AudioSegment
from transformers import WhisperForConditionalGeneration, WhisperProcessor
from st_audiorec import st_audiorec
import torch

os.environ['HTTP_PROXY'] = "http://proxy.nioint.com:8080"
os.environ['HTTPS_PROXY'] = "http://proxy.nioint.com:8080"

st.set_page_config(
    page_title="B4 ASR演示",
    page_icon="🧊",
    menu_items={
        'About': 'Whisper model used for B4 AI team'})

st.title("B4 ASR Demo|Whisper模型演示")
st.write("如在体验过程中发现bug问题，欢迎反馈至shilin.zhuang，感谢您的使用！")

CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", "；", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
                  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
                  "{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
                  "、", "﹂", "﹁", "‧", "～", "﹏", "，", "｛", "｝", "（", "）", "［", "］", "【", "】", "‥", "〽",
                  "『", "』", "〝", "〟", "⟨", "⟩", "〜", "：", "！", "？", "♪", "؛", "/", "\\", "º", "−", "^", "'", "ʻ", "ˆ"]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
chars_to_ignore_regex = f"[{re.escape(''.join(CHARS_TO_IGNORE))}]"


@st.cache_resource
def init_model():
    processor = WhisperProcessor.from_pretrained("whisper-large-v2")
    model = WhisperForConditionalGeneration.from_pretrained("whisper-large-v2")
    model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="chinese", task="transcribe")
    model.to(device)
    return model, processor


def speech_file_to_array_fn(batch):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        speech_array, sampling_rate = librosa.load(batch["path"], sr=16000)
        batch["speech"] = speech_array
    return batch


def map_to_pred(batch):
    audio = batch
    input_features = processor(audio["speech"], sampling_rate=16000, return_tensors="pt").input_features
    with torch.no_grad():
        predicted_ids = model.generate(input_features.to(device))[0]
    transcription = processor.decode(predicted_ids)
    batch["pred_strings"]


def transcript_process(batch):
    batch = speech_file_to_array_fn(batch)
    input_features = processor(batch["speech"], sampling_rate=16000, return_tensors="pt").input_features
    with torch.no_grad():
        predicted_ids = model.generate(input_features.to(device))[0]
    transcription = processor.decode(predicted_ids)
    batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
    st.write("转述文本为：", batch['pred_strings'])


model, processor = init_model()


async def upload_file_transcript(wav_file_path):
    upload_file = st.file_uploader("请上传一段你想识别的语音")
    if upload_file is None:
        st.warning("警告：尚未上传语音文件！")
    if upload_file:
        with st.spinner("Whisper正在转述你的语音，请稍等..."):
            with open(wav_file_path, 'wb') as f:
                f.write(upload_file.getbuffer())
            batch = {"path": wav_file_path}
            batch = speech_file_to_array_fn(batch)
            input_features = processor(batch["speech"], sampling_rate=16000, return_tensors="pt").input_features
            with torch.no_grad():
                predicted_ids = model.generate(input_features.to(device))[0]
            transcription = processor.decode(predicted_ids)
            batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
            st.write("ASR转述的结果是：", batch["pred_strings"])


async def microphone_transcript(wav_file_path):
    wav_audio_data = st_audiorec()
    if wav_audio_data is not None:
        audio = AudioSegment(wav_audio_data, frame_rate=16000, sample_width=4, channels=1)
        audio.export(wav_file_path, format="wav")
        with st.spinner("Whisper正在转述你的语音，请稍等..."):
            batch = {"path": wav_file_path}
            batch = speech_file_to_array_fn(batch)
            input_features = processor(batch["speech"], sampling_rate=16000, return_tensors="pt").input_features
            with torch.no_grad():
                predicted_ids = model.generate(input_features.to(device))[0]
            transcription = processor.decode(predicted_ids)
            batch["pred_strings"] = zhconv.convert(processor.tokenizer._normalize(transcription), 'zh-cn')
            st.write("ASR转述的结果是：", batch["pred_strings"])


async def main():
    st.markdown("请上传你想转述的语音或者是通过麦克风输入语音：")
    tab1, tab2 = st.tabs(["麦克风输入", "上传语音"])
    with tab1:
        wav_file_path = os.path.join('./asr_files/', 'tmp_mic.wav')
        await microphone_transcript(wav_file_path)
    with tab2:
        wav_file_path = os.path.join('./asr_files/', 'tmp.wav')
        await upload_file_transcript(wav_file_path)


if __name__ == "__main__":
    asyncio.run(main())
