import core import openai import models import time import gradio as gr import os api_key = os.environ["OPENAI_API_KEY"] api_base = os.environ["OPENAI_API_BASE"] # def embed(texts: list): # return openai.Embedding.create(input=texts, model="text-embedding-ada-002")["data"]["embedding"] def chatbot_initialize(): retriever = core.retriever.ChromaRetriever(pdf_dir="", collection_name="langchain", split_args={"size": 2048, "overlap": 10}, #embedding_model="text-embedding-ada-002" embed_model=models.BiomedModel() ) Chatbot = core.chatbot.RetrievalChatbot(retriever=retriever) return Chatbot def respond(query, additional_inputs, image): global Chatbot response = Chatbot.response(query, image) for i in range(len(response)): time.sleep(0.01) yield response[: i+1] if __name__ == "__main__": global Chatbot Chatbot=chatbot_initialize() demo = gr.ChatInterface( fn=respond, additional_inputs=[ gr.Image(type="filepath"), ] ) demo.queue().launch()