import gradio as gr
import openai
import os
import pandas as pd

from QDrantHelper import QDrantHelper

qdrantHelper = QDrantHelper()
def load_file(file_paths):
    success_upload_filepaths = []

    for file_path in file_paths:
        #判断文件扩展名是否为.pdf
        if file_path.lower().endswith('.pdf'):
            # 使用pdfplumber读取PDF文件
            #根据文件路径获取文件名
            fileName = os.path.basename(file_path)
            qdrantHelper.file_to_vector(file_path,fileName)
            success_upload_filepaths.append(fileName)

    return pd.DataFrame({"已上传的文件":success_upload_filepaths})

def sendChat(conversationMode,chatbot,inputText, modelDropdown, uploadResult,temperatureSlider, maxTokens, frequencyPenalty, presencePenaltyPenalty,stream=False):
    try:
        # 这里可以添加你的代码来处理输入文本和模型选择，并生成回复
        # 返回一个字符串作为回复
        chatbot.append([inputText, None])
        yield [chatbot,""]

        messages = []

        if conversationMode == '普通对话':

            for user_msg, assistant_msg in chatbot:
                if user_msg != None:
                    messages.append({"role": "user", "content": user_msg})
                if assistant_msg != None:
                    messages.append({"role": "assistant", "content": assistant_msg})
        else:
            current_file_list = uploadResult['已上传的文件'].values.tolist()
            prompt = qdrantHelper.query_retrival(inputText,current_file_list,3,chatbot)
            if prompt:
                messages.append({"role":"user","content":prompt})
                print(f"取回的查询结果,{prompt}")
            else:
                messages = []

        response = getLLMResponse(messages, modelDropdown, temperatureSlider, maxTokens, frequencyPenalty, presencePenaltyPenalty,stream)

        if stream:
            chatbot[-1][1] = ""
            for chunk in response:
                if chunk.choices[0].delta.content != None:
                    chatbot[-1][1] += chunk.choices[0].delta.content
                    yield [chatbot,""]
        else:
            chatbot[-1][1] = response.choices[0].message.content
            yield [chatbot,""]
    except Exception as e:
        print(e)

def clearInput():
    # 这里可以添加你的代码来清空输入文本
    # 返回一个字符串作为回复
    return ""

def getLLMResponse(messages, modelDropdown, temperatureSlider, maxTokens, frequencyPenalty, presencePenaltyPenalty,stream):

    client = openai.Client(api_key=os.environ["OPENAI_API_KEY_ZHIHU"],base_url=os.environ["OPENAI_API_BASE_ZHIHU"])
    response =client.chat.completions.create(
        model=modelDropdown,
        messages=messages,
        max_tokens=maxTokens,
        temperature=temperatureSlider,
        frequency_penalty=frequencyPenalty,
        presence_penalty=presencePenaltyPenalty,
        stream=stream
    )

    return response

def load_models():
    # 这里可以添加你的代码来加载模型列表
    # 返回一个列表作为模型列表
    return ["gpt-4o","gpt-4-turbo","gpt-4", "gpt-3.5-turbo","gpt-3.5"]

def load_conversationModes():
    return ["普通对话","文件对话"]

def main():
    with gr.Blocks() as app:
        with gr.Row():
            with gr.Column():
                chatbot = gr.Chatbot(label="聊天机器人")
                inputText = gr.Textbox(label="输入文本",placeholder="您好，请在这里输入您的问题。")
                with gr.Row():
                    submitButton = gr.Button("提交",size="sm")
                    clearButton = gr.Button("清空",size="sm")
            with gr.Column():
                conversationMode = gr.Radio(choices=load_conversationModes(), label="对话模式", value="普通对话")
                modelDropdown = gr.Dropdown(choices=load_models(), label="模型选择", value="gpt-3.5-turbo")
                stream = gr.Checkbox(label="流式输出", value=False)
                fileUpload = gr.File(label="文件上传",file_count="multiple", file_types=[".pdf"])
                uploadResult=gr.Dataframe(value=pd.DataFrame({"已上传的文件":[]}), visible=True,height=100)
                temperatureSlider = gr.Slider(minimum=0, maximum=1, step=0.1, label="温度", value=0.5)
                maxTokens = gr.Slider(minimum=0, maximum=1000, step=1, label="最大token", value=1000)
                frequencyPenalty = gr.Slider(minimum=0, maximum=1, step=0.1, label="频率惩罚", value=0.5)
                presencePenaltyPenalty = gr.Slider(minimum=0, maximum=1, step=0.1, label="存在惩罚", value=0.5)

        submitButton.click(fn=sendChat, inputs=[conversationMode,chatbot,inputText, modelDropdown,uploadResult, temperatureSlider, maxTokens, frequencyPenalty,
                                               presencePenaltyPenalty,stream], outputs=[chatbot,inputText])
        #监听键盘事件，如果按下回车键1，则执行 submitButton.click方法
        inputText.submit(fn=sendChat, inputs=[conversationMode,chatbot,inputText, modelDropdown, uploadResult,temperatureSlider, maxTokens, frequencyPenalty,
                                              presencePenaltyPenalty,stream], outputs=[chatbot,inputText])
        clearButton.click(fn=clearInput, inputs=[], outputs=[chatbot])
        fileUpload.upload(fn=load_file, inputs=[fileUpload], outputs=[uploadResult])

    app.launch()


if __name__ == "__main__":
    main()

    #如何学习人工智能？
    #感冒了吃什么药？