import gradio as gr
import modelscope_studio.components.antd as antd
import modelscope_studio.components.base as ms
from subway_qa.adapters import format_history, predict
from subway_qa.agents.asr import run as run_asr
from subway_qa.agents.rag import build_prompt


def media_predict(audio, text_input, history, state_value):
    files = [audio]
    # 处理文本输入
    if text_input and text_input.strip():
        history.append({"role": "user", "content": (text_input,)})
        prompt = build_prompt(
            text_input,
            use_query=True,
            history=history,
            extraction_cache=state_value["extraction_cache"],
        )
        formatted_history = format_history(
            history=history, prompt=prompt, oss_cache=state_value["oss_cache"]
        )
        history[-1]["content"] = text_input
    # 处理语音输入
    elif audio:
        for f in files:
            if f:
                history.append({"role": "user", "content": (f,)})
        asr_text = run_asr(audio)
        prompt = build_prompt(
            asr_text, history=history, extraction_cache=state_value["extraction_cache"]
        )
        formatted_history = format_history(
            history=history, prompt=prompt, oss_cache=state_value["oss_cache"]
        )
    else:
        # 没有输入时直接返回
        return (
            None,
            None,
            history,
            gr.update(visible=True),
            gr.update(visible=False),
            state_value,
        )

    # First yield
    yield (
        None,  # microphone
        history,  # media_chatbot
        gr.update(visible=False),  # submit_btn
        gr.update(visible=True),  # stop_btn
        state_value,  # state
    )

    history.append({"role": "assistant", "content": ""})

    for chunk in predict(formatted_history):
        if chunk["type"] == "text":
            history[-1]["content"] = chunk["data"]
            yield (
                None,  # microphone
                history,  # media_chatbot
                gr.update(visible=False),  # submit_btn
                gr.update(visible=True),  # stop_btn
                state_value,  # state
            )
        if chunk["type"] == "audio":
            history.append({"role": "assistant", "content": gr.Audio(chunk["data"])})

    # Final yield
    yield (
        None,  # microphone
        history,  # media_chatbot
        gr.update(visible=True),  # submit_btn
        gr.update(visible=False),  # stop_btn
        state_value,  # state
    )


def clear_history():
    return [], gr.update(value=None), gr.update(value=None)


with gr.Blocks() as demo, ms.Application(), antd.ConfigProvider():
    state = gr.State({"oss_cache": {}, "extraction_cache": {}})

    with gr.Row():
        with gr.Column(scale=1):
            with antd.Flex(gap="small", justify="center", align="center"):
                with antd.Flex(vertical=True, gap="small", align="center"):
                    antd.Typography.Title(
                        "地铁智能客服问答",
                        level=1,
                        elem_style=dict(margin=0, fontSize=28),
                    )
                    with antd.Flex(vertical=True, gap="small"):
                        antd.Typography.Text(
                            "🎯 使用说明：",
                            strong=True,
                        )
                        antd.Typography.Text(
                            "1️⃣ 点击音频录制按钮",
                        )
                        antd.Typography.Text("2️⃣ 输入音频")
                        antd.Typography.Text(
                            "3️⃣ 点击提交并等待模型的回答",
                        )
        with gr.Column(scale=2):
            microphone = gr.Audio(
                sources=["microphone"],
                format="wav",
                type="filepath",
            )
            text_input = gr.Textbox(label="文本输入", placeholder="请输入您的问题...")
            submit_btn = gr.Button("提交", variant="primary")
            stop_btn = gr.Button("停止", visible=False)
            clear_btn = gr.Button("清除历史")
    with gr.Row():
        media_chatbot = gr.Chatbot(height=650, type="messages")

    submit_event = submit_btn.click(
        fn=media_predict,
        inputs=[
            microphone,
            text_input,
            media_chatbot,
            state,
        ],
        outputs=[
            microphone,
            media_chatbot,
            submit_btn,
            stop_btn,
            state,
        ],
    )
    stop_btn.click(
        fn=lambda: (gr.update(visible=True), gr.update(visible=False)),
        inputs=None,
        outputs=[submit_btn, stop_btn],
        cancels=[submit_event],
        queue=False,
    )
    clear_btn.click(
        fn=clear_history,
        inputs=None,
        outputs=[media_chatbot, microphone],
    )


def main():
    demo.queue(default_concurrency_limit=100, max_size=100).launch(
        max_threads=100, ssr_mode=False, share=False
    )