llama_index_demo / functions.py
felix-weiland's picture
Update functions.py
0757389
import streamlit as st
import os, time
from llama_index.readers.schema.base import Document
from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, GPTTreeIndex
from langchain import OpenAI
import pandas as pd
def generate_response(prompt, index, llm_predictor, test=False):
if test:
return prompt
else:
response = index.query(prompt, llm_predictor=llm_predictor)
return response
def update_chat_state():
st.session_state.chat_sent = st.session_state.chat_input
st.session_state.chat_input = ''
def get_chat_input():
st.text_input(label="Write your query here", key="chat_input", on_change=update_chat_state)
return st.session_state.chat_sent
def config_llm_predictor():
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.3, model_name="gpt-3.5-turbo"))
max_input_size = 8192
num_output = None
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size = max_input_size, num_output = num_output, max_chunk_overlap = max_chunk_overlap)
return llm_predictor
@st.cache(allow_output_mutation=True)
def load_index(json_index):
#index = GPTTreeIndex.load_from_disk(json_index)
index = GPTSimpleVectorIndex.load_from_disk(json_index)
return index
@st.cache(allow_output_mutation=True)
def get_data():
data = pd.read_csv("data/appstore_reviews.csv")
data = data[["application", "review", "rating", "date"]]
data["application"] = data["application"].str.lower()
data = data[~data["review"].isna()]
return data
def set_api_key(api_key_file):
with open(api_key_file, "r") as file:
openai_key = file.read().replace("\n", "")
# Set environment
os.environ["OPENAI_API_KEY"] = openai_key
def get_search(data):
input_text = st.text_input("Search in reviews:", key="search")
output = data[data["review"].apply(lambda x: x.lower()).str.contains(input_text.lower())]
return output