import gradio as gr
import logging
import asyncio
import os
from modules.audio_processing import AudioProcessor
from modules.ai_models import AIModels
from modules.speech_to_text import speech_to_text

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def create_interface(config):
    ai_models = AIModels(config)
    audio_processor = AudioProcessor()
    message_history = []

    def reset_conversation():
        nonlocal message_history
        message_history = []
        return "对话历史已重置"

    # 处理文本问题的函数
    def process_text_question(question, model, temperature, max_tokens):
        nonlocal message_history
        message_history.append({"role": "user", "content": question})
        response = ""

        if model == "文心一言":
            response = ai_models.call_wenxin_ai(message_history, temperature=temperature, max_tokens=max_tokens)
        elif model == "Spark AI":
            response = ai_models.call_spark_ai(message_history, temperature=temperature, max_tokens=max_tokens)
        elif model == "千问":
            response = ai_models.call_qwen_ai(message_history, temperature=temperature, max_tokens=max_tokens)
        else:
            response = "请选择一个有效的模型。"

        message_history.append({"role": "assistant", "content": response})

        conversation_display = "\n\n".join(
            [f"{entry['role'].capitalize()}: {entry['content']}" for entry in message_history])
        return conversation_display

    def process_uploaded_file(audio_file):
        if audio_file is None:
            return "请上传一个音频文件。"
        else:
            audio_file_path = audio_file.name

            # 使用异步方式调用语音识别
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            recognized_text = loop.run_until_complete(speech_to_text(audio_file_path, config))
            loop.close()

            if not recognized_text:
                return "语音识别失败或未识别到有效的语音内容。"
            return recognized_text

    with gr.Blocks() as demo:
        gr.Markdown("# 大模型聚合对话系统")

        with gr.Row():
            with gr.Column():
                gr.Markdown("## 大模型选择")
                model_choice = gr.Radio(label="选择模型", choices=["文心一言", "Spark AI", "千问"], value="文心一言")

        with gr.Row():
            with gr.Column():
                gr.Markdown("## 语音输入")
                start_button = gr.Button("开始录音")
                stop_button = gr.Button("结束并识别")
                gr.Markdown(
                    "点击“开始录音”开始录音，请对我说你想说的话，我可以识别你说的内容哦~\n\n请允许浏览器获取麦克风权限")

                output_audio = gr.Textbox(label="语音识别结果", placeholder="语音识别内容将显示在这里",
                                          interactive=False)

            with gr.Column():
                gr.Markdown("## 上传音频文件")
                audio_file_input = gr.File(label="上传音频文件", type="file")
                process_button = gr.Button("识别并提问")
                # 创建一个隐藏的文本框来接收语音识别结果
                hidden_audio_text = gr.Textbox(visible=False)

        with gr.Row():
            with gr.Column():
                gr.Markdown("## 文本提问")
                text_input = gr.Textbox(label="输入你的问题", placeholder="在这里输入你的问题")
                temperature_slider = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, value=0.7, step=0.1)
                max_tokens_slider = gr.Slider(label="Max Tokens", minimum=10, maximum=500, value=100, step=10)
                ask_button = gr.Button("提问")

            with gr.Column():
                gr.Markdown("## 模型的回答展示")
                output_text = gr.Textbox(label="模型回答", placeholder="模型的回答将显示在这里", interactive=False,
                                         lines=10)

        # 将按钮的点击事件放在组件定义之后
        start_button.click(audio_processor.start_recognition, outputs=output_audio)
        stop_button.click(audio_processor.stop_recognition, inputs=[], outputs=text_input).then(
            fn=process_text_question,
            inputs=[text_input, model_choice, temperature_slider, max_tokens_slider],
            outputs=output_text
        )

        process_button.click(process_uploaded_file, inputs=audio_file_input, outputs=text_input).then(
            fn=process_text_question,
            inputs=[text_input, model_choice, temperature_slider, max_tokens_slider],
            outputs=output_text
        )

        ask_button.click(process_text_question,
                         inputs=[text_input, model_choice, temperature_slider, max_tokens_slider],
                         outputs=output_text)

        reset_button = gr.Button("重置对话")
        reset_button.click(reset_conversation, outputs=output_text)

    return demo