Spaces:
Sleeping
Sleeping
#bot from https://beebom.com/how-train-ai-chatbot-custom-knowledge-base-chatgpt-api/ | |
#!pip freeze > requirements2.txt | |
from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper | |
from langchain.chat_models import ChatOpenAI | |
import gradio as gr | |
import openai | |
import sys | |
import os | |
import time | |
#After this version of langchain it created a problem and bot not running | |
#pip uninstall -y langchain | |
#pip install langchain==0.0.153 | |
#os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
def construct_index(directory_path): | |
max_input_size = 4096 | |
num_outputs = 512 | |
max_chunk_overlap = 20 | |
chunk_size_limit = 600 | |
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) | |
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) | |
documents = SimpleDirectoryReader(directory_path).load_data() | |
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper) | |
index.save_to_disk('index.json') | |
return index | |
messages = [{"role": "system", "content": """You are a helpful customer service assistant. \ | |
# You respond to questions in the language the user asked the question in. \ | |
# You respond to questions about Dekalin products, services, website and company. \ | |
# You use only the material available in your knowledge base \ | |
# You respond in a friendly and helpful tone, giving clear and very concise answers \ | |
# In your first response you politely include a question that helps you clarify the question the user is asking, \ | |
# and you answer again based on this clarification\ | |
# If you don't know the answer to a question you invite the user to send an email to support@dekalin.de"""}] | |
def question_answer(question): | |
index = GPTSimpleVectorIndex.load_from_disk('index.json') | |
while True: | |
response = index.query(question, response_mode="compact") | |
messages.append({"User asks": question, "System response": response.response.strip()}) | |
#return response.response.strip() | |
for el in range(1): | |
history = [] | |
for el in response.response.strip(): | |
history.append(el) | |
word = ''.join(history) | |
time.sleep(0.02) | |
yield str(word) | |
break | |
#os.environ["OPENAI_API_KEY"] = 'OPENAI_API_KEY' | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
construct_index("context_data/data/done") | |
demo = gr.Interface(fn=question_answer, inputs="text", outputs="text", cache_examples=False, | |
examples=[['What do I use to install a sky hatch?'], | |
['What Dekalin product is best to install an antenna?'], | |
['When do I use 8936? '], | |
['How do I fix loose screws in the wall? '], | |
['What do I use to attach a shelf or a hook to the wall?'], | |
['What do I use to grout the shower?'], | |
['How do I repair damage to the outside wall of my vehicle?'], | |
['How do I repair a tear in my bumper?'], | |
['How do I repair damage to the outside wall of my vehicle?']], | |
title="Dekalin helpful AI robot assistant", | |
css="footer {visibility: hidden}")#.queue() | |
demo.queue() | |
demo.launch(inline=False, auth=('dekalin', 'dekalin')) | |