#!/usr/bin/env python
# encoding: utf-8
import gradio as gr
from PIL import Image
import traceback
import re
import torch
import argparse

from model_utils import load_model
from MiniCPM.bridge_MiniCPM import MiniCPM_respond
from InternVL.bridge_InternVL import InternVL_respond
from GLM_edge.bridge_glm import GLM_respond
from QWenVL.bridge_QwenVL import QwenVL_respond
# README, How to run demo on different devices
# For Nvidia GPUs support BF16 (like A100, H100, RTX3090)
# python web_demo.py --device cuda --dtype bf16

# For Nvidia GPUs do NOT support BF16 (like V100, T4, RTX2080)
# python web_demo.py --device cuda --dtype fp16

# For Mac with MPS (Apple silicon or AMD GPUs).
# PYTORCH_ENABLE_MPS_FALLBACK=1 python web_demo.py --device mps --dtype fp16

# Argparser
parser = argparse.ArgumentParser(description='demo')
parser.add_argument('--device', type=str, default='cuda:0', help='cuda or cpu')
parser.add_argument('--dtype', type=str, default='bf16', help='bf16 or fp16')
args = parser.parse_args()
device = args.device


ERROR_MSG = "Error, please retry"

models_dir = {
    'MiniCPM-Llama3-V-2_6-int4': 'E:/LLM/openbmb/MiniCPM-V-2_6-int4',
    # 'deepseek-vl2-tiny': 'deepseek-ai/deepseek-vl2-tiny',
    'InternVL2_5-2B' : 'E:/LLM/InternLM/InternVL2.5/InternVL-2.5-2B',
    'Qwen2_VL-2B': 'E:/LLM/Qwen/Qwen2-VL-2B-Instruct',
    # 'GLM-Edge-2B': 'E:/LLM/GLM/GLM-Edge-v-2b',
}

asr_models = [
    "openai/whisper-tiny", 
    "openai/whisper-base", 
    "openai/whisper-medium",
    "openai/whisper-large",
    # 'paraformer_large',
    # "SenseVoice_small"
]


form_MMLMs_dropdown = {
    'choices': list(models_dir.keys()),
    'value': 'MiniCPM-Llama3-V-2_6-int4',
    'interactive': True,
    'label': 'Model',
}

form_ASR_dropdown = {
    'choices': asr_models,
    'value': 'openai/whisper-base',
    'interactive': True,
    'label': 'ASR Model',
}

form_radio = {
    'choices': ['Beam Search', 'Sampling'],
    #'value': 'Beam Search',
    'value': 'Sampling',
    'interactive': True,
    'label': 'Decode Type'
}
# Beam Form
num_beams_slider = {
    'minimum': 0,
    'maximum': 5,
    'value': 3,
    'step': 1,
    'interactive': True,
    'label': 'Num Beams'
}
repetition_penalty_slider = {
    'minimum': 0,
    'maximum': 3,
    'value': 1.2,
    'step': 0.01,
    'interactive': True,
    'label': 'Repetition Penalty'
}
repetition_penalty_slider2 = {
    'minimum': 0,
    'maximum': 3,
    'value': 1.05,
    'step': 0.01,
    'interactive': True,
    'label': 'Repetition Penalty'
}
repetition_penalty_slider_glm = {
    'minimum': 0,
    'maximum': 3,
    'value': 1.5,
    'step': 0.01,
    'interactive': True,
    'label': 'Repetition Penalty'
}
max_new_tokens_slider = {
    'minimum': 1,
    'maximum': 8192,
    'value': 1024,
    'step': 1,
    'interactive': True,
    'label': 'Max New Tokens'    
}

top_p_slider = {
    'minimum': 0,
    'maximum': 1,
    'value': 0.8,
    'step': 0.05,
    'interactive': True,
    'label': 'Top P'    
}
top_k_slider = {
    'minimum': 0,
    'maximum': 200,
    'value': 100,
    'step': 1,
    'interactive': True,
    'label': 'Top K'    
}
temperature_slider = {
    'minimum': 0,
    'maximum': 2,
    'value': 0.7,
    'step': 0.05,
    'interactive': True,
    'label': 'Temperature'    
}


def create_component(params, comp='Slider'):
    if comp == 'Slider':
        return gr.Slider(
            minimum=params['minimum'],
            maximum=params['maximum'],
            value=params['value'],
            step=params['step'],
            interactive=params['interactive'],
            label=params['label']
        )
    elif comp == 'Radio':
        return gr.Radio(
            choices=params['choices'],
            value=params['value'],
            interactive=params['interactive'],
            label=params['label']
        )
    elif comp == 'Button':
        return gr.Button(
            value=params['value'],
            interactive=True
        )
    elif comp == 'DropDown':
        return gr.Dropdown(
            choices=params['choices'],
            value=params['value'],
            interactive=params['interactive'],
            label=params['label'],
        )


def upload_img(image, _chatbot, _app_session):
    image = Image.fromarray(image)

    _app_session['sts']=None
    _app_session['ctx']=[]
    _app_session['img']=image 
    _chatbot.append(('', 'Image uploaded successfully, you can talk to me now'))
    return _chatbot, _app_session


def respond(model_name, _question, _chat_bot, _app_cfg, params_form, num_beams, 
            repetition_penalty, repetition_penalty_2, top_p, top_k, temperature):

    if 'MiniCPM' in model_name:
       _, _chat_bot, _app_cfg = MiniCPM_respond(models_dir, model_name, _question, _chat_bot, _app_cfg, params_form, 
                                                num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature)
    elif 'Qwen' in model_name:
       _, _chat_bot, _app_cfg = QwenVL_respond(models_dir, model_name, _question, _chat_bot, _app_cfg, params_form, 
                                            num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature)
    elif 'InternVL' in model_name:
       _, _chat_bot, _app_cfg = InternVL_respond(models_dir, model_name, _question, _chat_bot, _app_cfg, params_form, 
                                                 num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature)

    return '', _chat_bot, _app_cfg

def regenerate_button_clicked(model_name, _question, _chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature):
    if len(_chat_bot) <= 1:
        _chat_bot.append(('Regenerate', 'No question for regeneration.'))
        return '', _chat_bot, _app_cfg
    elif _chat_bot[-1][0] == 'Regenerate':
        return '', _chat_bot, _app_cfg
    else:
        _question = _chat_bot[-1][0]
        _chat_bot = _chat_bot[:-1]
        _app_cfg['ctx'] = _app_cfg['ctx'][:-2]
    if 'GLM-Edge' in model_name:
        return GLM_respond(model_name, _question, _chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature)
    else:
        respond(model_name, _question, _chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature)


waveform_option = gr.WaveformOptions(
    waveform_color="#01C6FF",
    waveform_progress_color="#0066B4",
    skip_length=2,
    show_controls=False,
)

# 语音转文字
def transcribe_button_clicked(model_name, inputs, task):
        
        from transformers import pipeline
        
        if model_name == "openai/whisper-tiny":
            model_dir = "./ASR_Models/pretrained_models/whisper-tiny"
        elif model_name == "openai/whisper-base":
            model_dir = "./ASR_Models/pretrained_models/whisper-base"
        elif model_name == "openai/whisper-medium":
            model_dir = "./ASR_Models/pretrained_models/whisper-medium"
        elif model_name == "openai/whisper-large":
            model_dir = "./ASR_Models/pretrained_models/whisper-large-v3-turbo"
        
        pipe = pipeline(
            "automatic-speech-recognition",
            model=model_dir,
            chunk_length_s=30,
            device="cpu",
            )
        if inputs is None:
            raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")

        text = pipe(inputs, batch_size=8, generate_kwargs={"task": task}, return_timestamps=True)["text"]
        
        return text, text


with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column(scale=1, min_width=300):
            params_form = create_component(form_radio, comp='Radio')
            # 增加一个模型选择的下拉选择框
            with gr.Accordion("Model Selection") as model_select_accordion:
                MM_model_name = create_component(form_MMLMs_dropdown, comp='DropDown')
            with gr.Accordion("ASR Model Selection") as asr_model_select_accordion:
                asr_model_name = create_component(form_ASR_dropdown, comp='DropDown')
                asr_task = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
            with gr.Accordion("Beam Search") as beams_according:
                num_beams = create_component(num_beams_slider)
                repetition_penalty = create_component(repetition_penalty_slider)
            with gr.Accordion("Sampling") as sampling_according:
                top_p = create_component(top_p_slider) 
                top_k = create_component(top_k_slider)
                temperature = create_component(temperature_slider)
                repetition_penalty_2 = create_component(repetition_penalty_slider2)
            regenerate = create_component({'value': 'Regenerate'}, comp='Button')
        with gr.Column(scale=3, min_width=500):
            # 创建一个名为 app_session 的状态对象，使用 gr.State 类
            # gr.State 是 Gradio 库中的一个类，用于在用户会话中存储状态信息
            # 这里初始化了一个包含三个键的字典：'sts', 'ctx', 'img'
            # 初始值都设置为 None，表示这些状态信息尚未被赋值
            app_session = gr.State({'sts':None,'ctx':None,'img':None})
            bt_pic = gr.Image(label="Upload an image to start")
            audio_speech = gr.Audio(sources="microphone", type="filepath", waveform_options=waveform_option, label="Record an audio to start")
            with gr.Accordion("ASR result") as asr_according:
                asr_text_reslut = gr.Textbox(label=f"ASR text results.")
                transcribe = create_component({'value': 'Transcribe'}, comp='Button')
            chat_bot = gr.Chatbot(label=f"Chat with MutiModalLM")
            txt_message = gr.Textbox(label="Input text")
            
            regenerate.click(
                regenerate_button_clicked,
                [txt_message, chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
                [txt_message, chat_bot, app_session]
            )

            transcribe.click(
                transcribe_button_clicked,
                [asr_model_name, audio_speech, asr_task],
                [asr_text_reslut, txt_message]
            )

            txt_message.submit(
                respond, 
                [MM_model_name, txt_message, chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature], 
                [txt_message, chat_bot, app_session]
            )
            bt_pic.upload(lambda: None, None, chat_bot, queue=False).then(upload_img, inputs=[bt_pic,chat_bot,app_session], outputs=[chat_bot,app_session])

# launch
#demo.launch(share=False, debug=True, show_api=False, server_port=8080, server_name="0.0.0.0")
demo.launch()

