Spaces:
Running
Running
import os | |
import string | |
from typing import Any, Dict, List, Tuple, Union | |
import chromadb | |
import numpy as np | |
import openai | |
import pandas as pd | |
import requests | |
import streamlit as st | |
from datasets import load_dataset | |
from langchain.document_loaders import TextLoader | |
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings | |
from langchain.text_splitter import CharacterTextSplitter | |
from langchain.vectorstores import Chroma | |
from scipy.spatial.distance import cosine | |
from utils.helper_functions import * | |
openai.api_key = os.environ["OPENAI_API_KEY"] | |
# Load the dataset from a provided source. | |
dataset = load_dataset( | |
"eagle0504/youthless-homeless-shelter-web-scrape-dataset-qa-formatted" | |
) | |
# Initialize a new client for ChromeDB. | |
client = chromadb.Client() | |
# Generate a random number between 1 billion and 10 billion. | |
random_number: int = np.random.randint(low=1e9, high=1e10) | |
# Generate a random string consisting of 10 uppercase letters and digits. | |
random_string: str = "".join( | |
np.random.choice(list(string.ascii_uppercase + string.digits), size=10) | |
) | |
# Combine the random number and random string into one identifier. | |
combined_string: str = f"{random_number}{random_string}" | |
# Create a new collection in ChromeDB with the combined string as its name. | |
collection = client.create_collection(combined_string) | |
# Embed and store the first N supports for this demo | |
L = len(dataset["train"]["questions"]) | |
collection.add( | |
ids=[str(i) for i in range(0, L)], # IDs are just strings | |
documents=dataset["train"]["questions"], # Enter questions here | |
metadatas=[{"type": "support"} for _ in range(0, L)], | |
) | |
st.title("Youth Homelessness Chatbot") | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
st.sidebar.markdown( | |
""" | |
### Instructions: | |
This app guides you through YSA's website, utilizing a RAG-ready Q&A dataset [here](https://huggingface.co/datasets/eagle0504/youthless-homeless-shelter-web-scrape-dataset-qa-formatted) for chatbot assistance. 🤖 Enter a question, and it finds similar ones in the database, offering answers with a distance score to gauge relevance—the lower the score, the closer the match. 🎯 For better accuracy and to reduce errors, user feedback helps refine the database. ✨ | |
""" | |
) | |
st.sidebar.success( | |
"Please enter a distance threshold (we advise to set it around 0.2)." | |
) | |
special_threshold = st.sidebar.number_input( | |
"Insert a number", value=0.2, placeholder="Type a number..." | |
) # 0.3 | |
clear_button = st.sidebar.button("Clear Conversation", key="clear") | |
if clear_button: | |
st.session_state.messages = [] | |
# React to user input | |
if prompt := st.chat_input("Tell me about YSA"): | |
# Display user message in chat message container | |
st.chat_message("user").markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
question = prompt | |
with st.spinner("Wait for it..."): | |
results = collection.query(query_texts=question, n_results=5) | |
idx = results["ids"][0] | |
idx = [int(i) for i in idx] | |
ref = pd.DataFrame( | |
{ | |
"idx": idx, | |
"questions": [dataset["train"]["questions"][i] for i in idx], | |
"answers": [dataset["train"]["answers"][i] for i in idx], | |
"distances": results["distances"][0], | |
} | |
) | |
# special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3 | |
filtered_ref = ref[ref["distances"] < special_threshold] | |
if filtered_ref.shape[0] > 0: | |
st.success("There are highly relevant information in our database.") | |
ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ") | |
final_ref = filtered_ref | |
else: | |
st.warning( | |
"The database may not have relevant information to help your question so please be aware of hallucinations." | |
) | |
ref_from_db_search = ref["answers"].str.cat(sep=" ") | |
final_ref = ref | |
try: | |
llm_response = llama2_7b_ysa(question) | |
except: | |
llm_response = "Sorry, the inference endpoint is temporarily down. 😔" | |
finetuned_llm_guess = ["from_llm", question, llm_response, 0] | |
final_ref.loc[-1] = finetuned_llm_guess | |
final_ref.index = final_ref.index + 1 | |
# add ai judge as additional rating | |
independent_ai_judge_score = [] | |
for i in range(final_ref.shape[0]): | |
this_content = final_ref["answers"][i] | |
this_score = calculate_sts_openai_score(question, this_content) | |
independent_ai_judge_score.append(this_score) | |
final_ref["ai_judge"] = independent_ai_judge_score | |
engineered_prompt = f""" | |
Based on the context: {ref_from_db_search}, | |
answer the user question: {question}. | |
Answer the question directly (don't say "based on the context, ...") | |
""" | |
answer = call_chatgpt(engineered_prompt) | |
response = answer | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
with st.spinner("Wait for it..."): | |
st.markdown(response) | |
with st.expander("See reference:"): | |
st.table(final_ref) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
# st.session_state.messages.append( | |
# {"role": "assistant", "content": final_ref.to_json()} | |
# ) | |