File size: 5,806 Bytes
bac90e2
f967233
 
4a6ffa9
f967233
4a6ffa9
 
 
f967233
4a6ffa9
f967233
4a6ffa9
 
 
 
 
 
f967233
4a6ffa9
f967233
4a6ffa9
 
f967233
 
 
 
4a6ffa9
f967233
 
4a6ffa9
f967233
 
4a6ffa9
f967233
 
 
 
4a6ffa9
f967233
 
07e3ec5
f967233
07e3ec5
38a30d6
f967233
38a30d6
f967233
38a30d6
 
f967233
38a30d6
 
4a6ffa9
 
 
 
 
 
 
 
 
 
 
 
ef4f099
e937327
 
cc4707b
1166206
cc4707b
e937327
f967233
 
 
 
 
 
 
 
4a6ffa9
 
 
 
 
 
 
 
 
 
 
 
 
dff518b
f967233
38a30d6
 
 
dff518b
38a30d6
f967233
 
 
dff518b
 
4457702
9b13d7e
 
 
f967233
9b13d7e
 
f967233
 
 
 
9b13d7e
dff518b
dc597fc
c423df3
dc597fc
 
 
7cc4123
 
ee4a8f7
dc597fc
 
a68dd63
 
 
9fa3305
0cc585c
a68dd63
 
 
dff518b
 
 
 
 
 
 
 
4a6ffa9
 
 
 
 
 
a77f178
4a6ffa9
 
e2c7632
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import os
import string
from typing import Any, Dict, List, Tuple, Union

import chromadb
import numpy as np
import openai
import pandas as pd
import requests
import streamlit as st
from datasets import load_dataset
from langchain.document_loaders import TextLoader
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from scipy.spatial.distance import cosine

from utils.helper_functions import *

openai.api_key = os.environ["OPENAI_API_KEY"]


# Load the dataset from a provided source.
dataset = load_dataset(
    "eagle0504/youthless-homeless-shelter-web-scrape-dataset-qa-formatted"
)

# Initialize a new client for ChromeDB.
client = chromadb.Client()

# Generate a random number between 1 billion and 10 billion.
random_number: int = np.random.randint(low=1e9, high=1e10)

# Generate a random string consisting of 10 uppercase letters and digits.
random_string: str = "".join(
    np.random.choice(list(string.ascii_uppercase + string.digits), size=10)
)

# Combine the random number and random string into one identifier.
combined_string: str = f"{random_number}{random_string}"

# Create a new collection in ChromeDB with the combined string as its name.
collection = client.create_collection(combined_string)


# Embed and store the first N supports for this demo
L = len(dataset["train"]["questions"])
collection.add(
    ids=[str(i) for i in range(0, L)],  # IDs are just strings
    documents=dataset["train"]["questions"],  # Enter questions here
    metadatas=[{"type": "support"} for _ in range(0, L)],
)

st.title("Youth Homelessness Chatbot")

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

st.sidebar.markdown(
    """
    
    ### Instructions:
    
    This app guides you through YSA's website, utilizing a RAG-ready Q&A dataset [here](https://huggingface.co/datasets/eagle0504/youthless-homeless-shelter-web-scrape-dataset-qa-formatted) for chatbot assistance. 🤖 Enter a question, and it finds similar ones in the database, offering answers with a distance score to gauge relevance—the lower the score, the closer the match. 🎯 For better accuracy and to reduce errors, user feedback helps refine the database. ✨
    
    """
)
st.sidebar.success(
    "Please enter a distance threshold (we advise to set it around 0.2)."
)
special_threshold = st.sidebar.number_input(
    "Insert a number", value=0.2, placeholder="Type a number..."
)  # 0.3
clear_button = st.sidebar.button("Clear Conversation", key="clear")

if clear_button:
    st.session_state.messages = []

# React to user input
if prompt := st.chat_input("Tell me about YSA"):
    # Display user message in chat message container
    st.chat_message("user").markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    question = prompt
    with st.spinner("Wait for it..."):
        results = collection.query(query_texts=question, n_results=5)
        idx = results["ids"][0]
        idx = [int(i) for i in idx]
        ref = pd.DataFrame(
            {
                "idx": idx,
                "questions": [dataset["train"]["questions"][i] for i in idx],
                "answers": [dataset["train"]["answers"][i] for i in idx],
                "distances": results["distances"][0],
            }
        )
        # special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
        filtered_ref = ref[ref["distances"] < special_threshold]
        if filtered_ref.shape[0] > 0:
            st.success("There are highly relevant information in our database.")
            ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ")
            final_ref = filtered_ref
        else:
            st.warning(
                "The database may not have relevant information to help your question so please be aware of hallucinations."
            )
            ref_from_db_search = ref["answers"].str.cat(sep=" ")
            final_ref = ref

        try:
            llm_response = llama2_7b_ysa(question)
        except:
            llm_response = "Sorry, the inference endpoint is temporarily down. 😔"

        finetuned_llm_guess = ["from_llm", question, llm_response, 0]
        final_ref.loc[-1] = finetuned_llm_guess
        final_ref.index = final_ref.index + 1

        # add ai judge as additional rating
        independent_ai_judge_score = []
        for i in range(final_ref.shape[0]):
            this_content = final_ref["answers"][i]
            this_score = calculate_sts_openai_score(question, this_content)
            independent_ai_judge_score.append(this_score)

        final_ref["ai_judge"] = independent_ai_judge_score

        engineered_prompt = f"""
            Based on the context: {ref_from_db_search},
            answer the user question: {question}.
            Answer the question directly (don't say "based on the context, ...")
        """

        answer = call_chatgpt(engineered_prompt)
        response = answer

    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        with st.spinner("Wait for it..."):
            st.markdown(response)
            with st.expander("See reference:"):
                st.table(final_ref)
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response})
    # st.session_state.messages.append(
    #     {"role": "assistant", "content": final_ref.to_json()}
    # )