import streamlit as st import os from streamlit_chat import message from llama_index.readers.schema.base import Document from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, GPTTreeIndex from langchain import OpenAI import functions as f import pandas as pd #f.set_api_key("openai_key.txt") # Not needed as it's set in the huggingface environment. llm_predictor = f.config_llm_predictor() base_index = {} for application in os.listdir("indices_vector"): if application == ".DS_Store": continue else: name = application.split(".")[0] base_index[name] = f.load_index(f"indices_vector/{application}") base_index = {key: value for key, value in sorted(base_index.items())} df = f.get_data() #Creating the chatbot interface st.title("Chat with your reviews") application = st.selectbox("Choose application", options=list(base_index.keys())) index = base_index[application] data = df[df["application"] == application.lower()] # Storing the chat if "generated" not in st.session_state: st.session_state["generated"] = [] if "past" not in st.session_state: st.session_state["past"] = [] if 'chat_sent' not in st.session_state: st.session_state.chat_sent = '' tab1, tab2 = st.tabs(["Chat", "Reviews"]) with tab1: chat_input = f.get_chat_input() if chat_input: output = f.generate_response(chat_input, index, llm_predictor) output = str(output).strip() # store the output st.session_state.past.append(chat_input) st.session_state.generated.append(output) # Empty state so that chat input is not accidentally resent del chat_input st.session_state.chat_sent = '' # Push things if st.session_state["generated"]: for i in range(len(st.session_state["generated"]) - 1, -1, -1): message(st.session_state["generated"][i], key=str(i)) message(st.session_state["past"][i], is_user=True, key=str(i) + "_user") with tab2: output = f.get_search(data) if len(output) > 0: for i, info in output[:100].iterrows(): st.write(info["review"], info["rating"], info["date"].split(" ")[0]) st.write("______")