from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context, load_index_from_storage, StorageContext, PromptHelper from llama_index.llms import OpenAI import gradio as gr import sys import os try: from Config import openai_key os.environ["OPENAI_API_KEY"] = openai_key except: pass """ Code adopted from Beebom article: "How to Train an AI Chatbot With Custom Knowledge Base Using ChatGPT API" by Arjun Sha https://beebom.com/how-train-ai-chatbot-custom-knowledge-base-chatgpt-api/ """ max_input_size = 4096 num_outputs = 512 chunk_size_limit = 600 prompt_helper = PromptHelper(context_window=max_input_size, num_output=num_outputs, chunk_overlap_ratio=0.1, chunk_size_limit=chunk_size_limit) llm = OpenAI(model="gpt-3.5-turbo", temperature=0.5, max_tokens=num_outputs) service_context = ServiceContext.from_defaults(llm=llm, prompt_helper=prompt_helper) set_global_service_context(service_context) def retrieve_index(index_path): storage_context = StorageContext.from_defaults(persist_dir=index_path) index = load_index_from_storage(storage_context) return index def chatbot(input_text): response = QE.query(input_text) response_stream = "" for r in response.response_gen: response_stream += r yield response_stream if __name__ == "__main__": iface = gr.Interface(fn=chatbot, inputs=gr.components.Textbox(lines=7, label="Enter your text"), outputs="text", title="AI Chatbot for the Doing What Works Library") index = retrieve_index("dww_vectors") QE = index.as_query_engine(streaming=True) iface.launch(share=False)