pulseDemo / app_tree.py
svummidi's picture
Updated version and index structures
91532c2
from llama_index import Document, SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, GPTTreeIndex, LLMPredictor, PromptHelper, ServiceContext
from llama_index import download_loader
from langchain import OpenAI
from pathlib import Path
import gradio as gr
import sys
import os
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=os.environ.get("LOGLEVEL", "DEBUG"))
#dataFiles = ["RetroApril","RetroMarch", "Snowflake", "Datadog", "Databricks", "SplunkProducts", "SplunkEnterprise"]
dataFiles = ["Lastpass", "RetroApril","RetroMarch"]
cache = {}
def indexFile(filePath):
PandasCSVReader = download_loader("PandasCSVReader")
loader = PandasCSVReader()
documents = loader.load_data(file=Path('./csv/' + filePath + '.csv'))
index = GPTTreeIndex.from_documents(documents)
index.save_to_disk("treeIndex/" + filePath + '.json')
def loadData():
"""
Load indices from disk for improved performance
"""
for file in dataFiles :
print("Loading file "+ file)
indexFilePath= "treeIndex/" + file + '.json'
if not os.path.exists(indexFilePath):
indexFile(file)
cache[file]= GPTTreeIndex.load_from_disk(indexFilePath)
def chatbot(indexName, input_text):
"""
Chatbot function that takes in a prompt and returns a response
"""
index = cache[indexName]
response = index.query(input_text, response_mode="compact")
return response.response
log = logging.getLogger(__name__)
loadData()
iface = gr.Interface(fn=chatbot,
inputs= [
gr.Dropdown(dataFiles,
type="value", value="Lastpass", label="Select Pulse Data"),
gr.Textbox(lines=7, label="Ask any question", placeholder='What is the summary?')],
outputs="text",
title="NLP Demo for Chat Interface")
iface.launch(share=False)