File size: 3,806 Bytes
bf5ca09 77bc24c bf5ca09 77bc24c bf5ca09 55b7eee bf5ca09 77bc24c bf5ca09 ad170bf 2d6e093 ad170bf 2d6e093 bf5ca09 ad170bf bf5ca09 6999e65 bf5ca09 cf49bd8 bf5ca09 cf49bd8 bf5ca09 cf49bd8 bf5ca09 6999e65 cf49bd8 bf5ca09 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import gradio as gr
import os
import logging
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders import DirectoryLoader
logging.basicConfig(filename='./Logs/bot.log', level=logging.WARNING, format='%(asctime)s - %(levelname)s - %(message)s')
txt_loader = DirectoryLoader('./LVE/', glob="**/*.txt")
pdf_loader = DirectoryLoader('./LVE/', glob="**/*.pdf")
doc_loader = DirectoryLoader('./LVE/', glob="**/*.docx")
loaders = [pdf_loader, txt_loader, doc_loader]
documents = []
for loader in loaders:
documents.extend(loader.load())
print(f"Total # of documents: {len(documents)}")
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
documents = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(documents, embeddings)
from langchain.memory import ConversationBufferMemory
#memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever())
#chat_history = []
def submit_callback(user_message):
default_prompt = "For answers, refer to the provided content. If no answer is found, contact lveswim@gmail.com."
prompt = default_prompt + user_message
# Log the user's query at INFO level
logging.info(f"User Query: {user_message}")
input_data = {"question": prompt, "chat_history": []} # Add an empty "chat_history" key
response = qa(input_data)
# Log the chatbot's response at INFO level
logging.info(f"Chatbot Response: {response['answer']}")
return response["answer"]
iface = gr.Interface(
fn=submit_callback,
inputs=gr.inputs.Textbox(lines=2, label="Enter your query"),
outputs=gr.outputs.Textbox(label="Chatbot Response"),
#outputs=gr.outputs.HTML(label="Chatbot Response"),
title="LVE Torpedoes Chatbot",
layout="vertical",
description="Enter your query to chat with the LVET chatbot",
examples=[
["What are the practice times for each age group ?"],
["What are the required fields to set up a meet in Touchpad?"],
["Dryland workout for swimmers ?"],
["What are the eligibility criteria for the Mini Torpedoes program?"],
["What is the eligibility to participate in the LVET Swim Team?"],
["How many volunteer hours are required per family during the swim season?"],
["How can I receive credit hours for the official training?"],
["How are swimmers grouped for practice?"],
["When do evaluations take place for new swimmers?"],
["Who are LVET's Board Members"],
["What are the regular season meets start times?"],
["How can I contact LVET's Board Members?"],
["What is the penalty for not meeting the required volunteer hours?"],
["Volunteer Hours?"],
["What types of events can a swimmer enter and how many?"],
["How do I sign up for volunteer jobs to fulfill my volunteer hours?"],
["Volunteer jobs that do not require certification or prior experience"],
["What are the responsibilities of an Age Group Coordinator?"],
["How do I commit my swimmer for meets/events?"],
["How are timers distributed between the host and visiting teams in dual meets?"],
["What happens if a watch malfunctions during an event?"],
["What is the difference between the Divisional Meets and the All Star Meet?"],
["What is the ODSL Scholarship Program and what's the award ?"]
],
theme="default"
)
iface.launch() |