import core import openai import models import time import gradio as gr import os import asyncio import time # openai.api_base = "https://lonlie.plus7.plus/v1" # openai.api_key = "sk-ac6SSdDqwB6Syk0U3e88FcF49f6b4c3c9c4e0247A38aB699" # os.environ["OPENAI_API_KEY"]="sk-ac6SSdDqwB6Syk0U3e88FcF49f6b4c3c9c4e0247A38aB699" # os.environ["OPENAI_API_BASE"]="https://lonlie.plus7.plus/v1" api_key = os.environ["OPENAI_API_KEY"] api_base = os.environ["OPENAI_API_BASE"] # def embed(texts: list): # return openai.Embedding.create(input=texts, model="text-embedding-ada-002")["data"]["embedding"] def chatbot_initialize(): retriever = core.retriever.ChromaRetriever(pdf_dir="", collection_name="langchain", split_args={"size": 2048, "overlap": 10}, #embedding_model="text-embedding-ada-002" embed_model=models.BiomedModel() ) Chatbot = core.chatbot.RetrievalChatbot(retriever=retriever) return Chatbot async def respond(query, chat_history, img_path_list, chat_history_string): time1 = time.time() global Chatbot response, logs = await Chatbot.response(query, image_paths=img_path_list, return_logs=True) chat_history.append((query, response)) if img_path_list is None: chat_history_string += "Query: " + query + "\nImage: None" + "\nResponse: " + response + "\n\n\n" else: chat_history_string += "Query: " + query + "\nImages: " + "\n".join([path.name for path in img_path_list]) + "\nResponse: " + response + "\n\n\n" time2 = time.time() print(f"Total: {time2-time1}") return "", chat_history, logs, chat_history_string if __name__ == "__main__": global Chatbot Chatbot=chatbot_initialize() with gr.Blocks() as demo: # chat = gr.ChatInterface( # fn=respond, # chatbot=gr.Chatbot(show_label=True, show_copy_button=True), # additional_inputs=[ # gr.Image(type="filepath"), # ] # ) with gr.Row(): with gr.Column(scale=2): chatbot = gr.Chatbot() msg = gr.Textbox(label="Query", show_label=True) imgs = gr.File(file_count='multiple', file_types=['image'], type="filepath", label='Upload Images') clear = gr.ClearButton([msg, chatbot]) with gr.Column(scale=1): sidebar = gr.Textbox(label="Subquestions", show_label=True, show_copy_button=True, interactive=False, max_lines=30) history = gr.Textbox(label="Copy Chat History", show_label=True, show_copy_button=True, interactive=False, max_lines=5) msg.submit(respond, inputs=[msg, chatbot, imgs, history], outputs=[msg, chatbot, sidebar, history]) demo.queue().launch()