alexripperton's picture
Update app.py
93dc89f
raw
history blame contribute delete
No virus
3.08 kB
import os
import sys
import gradio as gr
import torch
from langchain import OpenAI
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.llms.base import LLM
from llama_index import (
Document,
GPTListIndex,
GPTSimpleVectorIndex,
LangchainEmbedding,
LLMPredictor,
PromptHelper,
SimpleDirectoryReader,
)
from transformers import pipeline
hfemb = HuggingFaceEmbeddings()
embed_model = LangchainEmbedding(hfemb)
#OpenAI.api_key = "YOUR_API_KEY"
def build_the_bot(input_text):
# set maximum input
max_input_size = 4096
# set number of output tokens
num_outputs = 256
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
#OpenAI.api_key = api_key
prompt_helper = PromptHelper(
max_input_size,
num_outputs,
max_chunk_overlap,
chunk_size_limit=chunk_size_limit,
)
# define LLM
llm_predictor = LLMPredictor(
llm=OpenAI(temperature=1, model_name="gpt-3.5-turbo", max_tokens=num_outputs )
)
text_list = [input_text]
documents = [Document(t) for t in text_list]
global index
index = GPTSimpleVectorIndex(
documents,
embed_model=embed_model,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
)
return "Index saved successfully!!!"
def set_api_key(api_key):
#OpenAI.api_key = api_key
os.environ["OPENAI_API_KEY"] = api_key
return 'key has been set'
def chat(chat_history, user_input):
bot_response = index.query(user_input)
# print(bot_response)
response = ""
for letter in "".join(
bot_response.response
): # [bot_response[i:i+1] for i in range(0, len(bot_response), 1)]:
response += letter + ""
yield chat_history + [(user_input, response)]
with gr.Blocks() as demo:
gr.Markdown("This application allows you to give GPT-3.5 (ChatGPT) as much context as you want. Just insert the text you want GPT to use in the context box and a llama index will be created wich allows the model to query your contextual information.")
with gr.Tab("Input Context Information"):
openai_api_key = gr.Textbox(label= 'OpenAIAPIKey: ', type= 'password' )
api_out = gr.Textbox(label='API key stauts ')
api_button = gr.Button('set api key')
api_button.click(set_api_key, openai_api_key, api_out)
text_input = gr.Textbox(label = 'Input Context Documents')
text_output = gr.Textbox(label = 'Depending on the length of your context information it may take a few minutes for the bot to build')
text_button = gr.Button("Build the Bot!")
text_button.click(build_the_bot, text_input, text_output)
with gr.Tab("Bot With Context"):
# inputbox = gr.Textbox("Input your text to build a Q&A Bot here....."
chatbot = gr.Chatbot()
message = gr.Textbox("What is the context document about?")
message.submit(chat, [chatbot, message], chatbot)
demo.queue().launch(debug=True)