Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@
|
|
11 |
import openai
|
12 |
import json
|
13 |
import gradio as gr
|
|
|
14 |
from openai import OpenAI
|
15 |
#from langchain_community.chat_models import ChatOpenAI
|
16 |
#from langchain_community.chat_models.openai import ChatOpenAI
|
@@ -18,7 +19,7 @@ from openai import OpenAI
|
|
18 |
# rebuild storage context and load knowledge index
|
19 |
# from llama_index import StorageContext, load_index_from_storage, LLMPredictor, ServiceContext
|
20 |
from llama_index.core import StorageContext
|
21 |
-
from llama_index.core import load_index_from_storage
|
22 |
from llama_index.llms.openai import OpenAI
|
23 |
#from llama_index.core import Settings
|
24 |
|
@@ -28,7 +29,7 @@ import socket;
|
|
28 |
# deprecated
|
29 |
storage_context = StorageContext.from_defaults(persist_dir='./')
|
30 |
# gpt-3.5_turbo is the current default model
|
31 |
-
llm_predictor = OpenAI(temperature=0.5, model_name="gpt-4")
|
32 |
#service_context = ServiceContext.from_defaults(llm=llm_predictor)
|
33 |
#index = load_index_from_storage(storage_context, service_context=service_context)
|
34 |
index = load_index_from_storage(storage_context)
|
@@ -41,7 +42,7 @@ class Chatbot:
|
|
41 |
#self.history_file = f"./chat_log.json"
|
42 |
|
43 |
def generate_response(self, user_input):
|
44 |
-
query_engine = index.as_query_engine(
|
45 |
response = query_engine.query(user_input)
|
46 |
|
47 |
# generate response
|
|
|
11 |
import openai
|
12 |
import json
|
13 |
import gradio as gr
|
14 |
+
import os
|
15 |
from openai import OpenAI
|
16 |
#from langchain_community.chat_models import ChatOpenAI
|
17 |
#from langchain_community.chat_models.openai import ChatOpenAI
|
|
|
19 |
# rebuild storage context and load knowledge index
|
20 |
# from llama_index import StorageContext, load_index_from_storage, LLMPredictor, ServiceContext
|
21 |
from llama_index.core import StorageContext
|
22 |
+
from llama_index.core import load_index_from_storage
|
23 |
from llama_index.llms.openai import OpenAI
|
24 |
#from llama_index.core import Settings
|
25 |
|
|
|
29 |
# deprecated
|
30 |
storage_context = StorageContext.from_defaults(persist_dir='./')
|
31 |
# gpt-3.5_turbo is the current default model
|
32 |
+
#llm_predictor = OpenAI(temperature=0.5, model_name="gpt-4")
|
33 |
#service_context = ServiceContext.from_defaults(llm=llm_predictor)
|
34 |
#index = load_index_from_storage(storage_context, service_context=service_context)
|
35 |
index = load_index_from_storage(storage_context)
|
|
|
42 |
#self.history_file = f"./chat_log.json"
|
43 |
|
44 |
def generate_response(self, user_input):
|
45 |
+
query_engine = index.as_query_engine()
|
46 |
response = query_engine.query(user_input)
|
47 |
|
48 |
# generate response
|