Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -24,7 +24,7 @@ from openai import OpenAI
|
|
24 |
from llama_index.core import StorageContext
|
25 |
from llama_index.core import load_index_from_storage
|
26 |
from llama_index.llms.openai import OpenAI
|
27 |
-
|
28 |
|
29 |
# add datetime and ip to the log file
|
30 |
from datetime import datetime;
|
@@ -41,7 +41,7 @@ session_id = uuid4()
|
|
41 |
# deprecated
|
42 |
storage_context = StorageContext.from_defaults(persist_dir='./')
|
43 |
# gpt-3.5_turbo is the current default model
|
44 |
-
|
45 |
#service_context = ServiceContext.from_defaults(llm=llm_predictor)
|
46 |
#index = load_index_from_storage(storage_context, service_context=service_context)
|
47 |
index = load_index_from_storage(storage_context)
|
@@ -67,7 +67,7 @@ class Chatbot:
|
|
67 |
)
|
68 |
|
69 |
def generate_response(self, user_input):
|
70 |
-
query_engine = index.as_query_engine(llm=
|
71 |
response = query_engine.query(user_input)
|
72 |
|
73 |
# generate response
|
|
|
24 |
from llama_index.core import StorageContext
|
25 |
from llama_index.core import load_index_from_storage
|
26 |
from llama_index.llms.openai import OpenAI
|
27 |
+
from llama_index.core import Settings
|
28 |
|
29 |
# add datetime and ip to the log file
|
30 |
from datetime import datetime;
|
|
|
41 |
# deprecated
|
42 |
storage_context = StorageContext.from_defaults(persist_dir='./')
|
43 |
# gpt-3.5_turbo is the current default model
|
44 |
+
Settings.llm = OpenAI(temperature=0.5, model="gpt-4")
|
45 |
#service_context = ServiceContext.from_defaults(llm=llm_predictor)
|
46 |
#index = load_index_from_storage(storage_context, service_context=service_context)
|
47 |
index = load_index_from_storage(storage_context)
|
|
|
67 |
)
|
68 |
|
69 |
def generate_response(self, user_input):
|
70 |
+
query_engine = index.as_query_engine(llm=llm)
|
71 |
response = query_engine.query(user_input)
|
72 |
|
73 |
# generate response
|