Add ServiceContext to llama_index imports and instantiate it for use in index.query() for more efficient and customizable responses
Browse files- app.py +7 -4
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,8 +1,7 @@
|
|
1 |
-
from llama_index import Document, SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
|
2 |
from llama_index import download_loader
|
3 |
-
from langchain import
|
4 |
from pathlib import Path
|
5 |
-
import openai
|
6 |
import gradio as gr
|
7 |
import sys
|
8 |
import os
|
@@ -11,6 +10,10 @@ dataFiles = ["RetroApril","RetroMarch", "Snowflake", "Datadog", "Databricks", "S
|
|
11 |
|
12 |
cache = {}
|
13 |
|
|
|
|
|
|
|
|
|
14 |
def indexFile(filePath):
|
15 |
PandasCSVReader = download_loader("PandasCSVReader")
|
16 |
loader = PandasCSVReader()
|
@@ -34,7 +37,7 @@ def chatbot(indexName, input_text):
|
|
34 |
Chatbot function that takes in a prompt and returns a response
|
35 |
"""
|
36 |
index = cache[indexName]
|
37 |
-
response = index.query(input_text, response_mode="compact")
|
38 |
return response.response
|
39 |
|
40 |
|
|
|
1 |
+
from llama_index import Document, SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext
|
2 |
from llama_index import download_loader
|
3 |
+
from langchain.chat_models import ChatOpenAI
|
4 |
from pathlib import Path
|
|
|
5 |
import gradio as gr
|
6 |
import sys
|
7 |
import os
|
|
|
10 |
|
11 |
cache = {}
|
12 |
|
13 |
+
prompt_helper = PromptHelper(4096, 256, 20)
|
14 |
+
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo"))
|
15 |
+
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
|
16 |
+
|
17 |
def indexFile(filePath):
|
18 |
PandasCSVReader = download_loader("PandasCSVReader")
|
19 |
loader = PandasCSVReader()
|
|
|
37 |
Chatbot function that takes in a prompt and returns a response
|
38 |
"""
|
39 |
index = cache[indexName]
|
40 |
+
response = index.query(input_text, response_mode="compact", service_context=service_context)
|
41 |
return response.response
|
42 |
|
43 |
|
requirements.txt
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
llama-index==0.5.25
|
|
|
2 |
transformers
|
|
|
1 |
llama-index==0.5.25
|
2 |
+
openai==0.27.4
|
3 |
transformers
|