chatCah / app.py
dioarafl's picture
Update app.py
ead8556 verified
import os
import gradio as gr
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, ServiceContext, PromptHelper
from langchain.chat_models import ChatOpenAI
def init_index(directory_path):
# Model parameters
max_input_size = 4096
num_outputs = 512
max_chunk_overlap = 20
chunk_size_limit = 600
# Initialize LLM predictor with LangChain ChatOpenAI model
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
# Read documents from specified directory
documents = SimpleDirectoryReader(directory_path).load_data()
# Initialize index with documents data
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
# Save the created index
index.save_to_disk('index.json')
return index
def chatbot(input_text):
# Load index
index = GPTSimpleVectorIndex.load_from_disk('index.json')
# Get response for the question
response = index.query(input_text, response_mode="compact")
return response.response
# Function to input OpenAI API key
def get_api_key():
os.environ["OPENAI_API_KEY"] = input("Please enter your OpenAI API key: ")
# Create UI interface to interact with GPT-3 model
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, placeholder="Enter your question here"),
outputs="text",
title="Frost AI ChatBot: Your Knowledge Companion Powered by ChatGPT",
description="Ask any question about research papers",
allow_screenshot=True)
# Add API key input to interface
iface.add_input("textbox", label="OpenAI API Key", type="text", default=get_api_key())
# Initialize index
init_index("docs")
# Launch the interface
iface.launch(share=True)