Spaces:
Sleeping
Sleeping
arabellastrange
commited on
Commit
·
8922c23
1
Parent(s):
de33d74
fixing key calls
Browse files- app.py +3 -4
- generate_response.py +0 -1
- llmsearch/utilityV2.py +0 -1
- utils.py +0 -5
- web_search.py +0 -1
app.py
CHANGED
@@ -1,13 +1,12 @@
|
|
1 |
import logging
|
|
|
2 |
from time import asctime
|
3 |
|
4 |
import gradio as gr
|
5 |
from llama_index.core import Document, VectorStoreIndex
|
6 |
-
from llama_index.core.evaluation import SemanticSimilarityEvaluator, FaithfulnessEvaluator
|
7 |
|
8 |
from generate_response import generate_chat_response_with_history, set_llm, is_search_query, condense_question, \
|
9 |
-
generate_chat_response_with_history_rag_return_response
|
10 |
-
from utils import read_file
|
11 |
from web_search import search
|
12 |
|
13 |
API_KEY_PATH = "../keys/gpt_api_key.txt"
|
@@ -98,7 +97,7 @@ if __name__ == '__main__':
|
|
98 |
logger.addHandler(filehandler) # set the new handler
|
99 |
logger.setLevel(logging.INFO)
|
100 |
|
101 |
-
api_key =
|
102 |
|
103 |
# GPT - 4 Turbo. The latest GPT - 4 model intended to reduce cases of “laziness” where the model doesn’t complete
|
104 |
# a task. Returns a maximum of 4,096 output tokens. Link:
|
|
|
1 |
import logging
|
2 |
+
import os
|
3 |
from time import asctime
|
4 |
|
5 |
import gradio as gr
|
6 |
from llama_index.core import Document, VectorStoreIndex
|
|
|
7 |
|
8 |
from generate_response import generate_chat_response_with_history, set_llm, is_search_query, condense_question, \
|
9 |
+
generate_chat_response_with_history_rag_return_response
|
|
|
10 |
from web_search import search
|
11 |
|
12 |
API_KEY_PATH = "../keys/gpt_api_key.txt"
|
|
|
97 |
logger.addHandler(filehandler) # set the new handler
|
98 |
logger.setLevel(logging.INFO)
|
99 |
|
100 |
+
api_key = os.getenv('gpt_api_key')
|
101 |
|
102 |
# GPT - 4 Turbo. The latest GPT - 4 model intended to reduce cases of “laziness” where the model doesn’t complete
|
103 |
# a task. Returns a maximum of 4,096 output tokens. Link:
|
generate_response.py
CHANGED
@@ -118,7 +118,6 @@ def is_closing(message):
|
|
118 |
|
119 |
|
120 |
def is_search_query(message):
|
121 |
-
|
122 |
response = llm.complete(
|
123 |
f'Is the user message a request for factual information? Answer True or False only. For example: \n User '
|
124 |
f'message: "Where do watermelons grow?" \n Assistant response: True \n User message "Do you like watermelons?" '
|
|
|
118 |
|
119 |
|
120 |
def is_search_query(message):
|
|
|
121 |
response = llm.complete(
|
122 |
f'Is the user message a request for factual information? Answer True or False only. For example: \n User '
|
123 |
f'message: "Where do watermelons grow?" \n Assistant response: True \n User message "Do you like watermelons?" '
|
llmsearch/utilityV2.py
CHANGED
@@ -12,7 +12,6 @@ import openai
|
|
12 |
from tenacity import *
|
13 |
|
14 |
# from agents.utils import read_file
|
15 |
-
from utils import read_file
|
16 |
|
17 |
logger = logging.getLogger("agent_logger")
|
18 |
openai.api_key = os.getenv('gpt_api_key')
|
|
|
12 |
from tenacity import *
|
13 |
|
14 |
# from agents.utils import read_file
|
|
|
15 |
|
16 |
logger = logging.getLogger("agent_logger")
|
17 |
openai.api_key = os.getenv('gpt_api_key')
|
utils.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
def read_file(path):
|
2 |
-
txt = open(path, "r")
|
3 |
-
file = txt.read()
|
4 |
-
txt.close()
|
5 |
-
return file
|
|
|
|
|
|
|
|
|
|
|
|
web_search.py
CHANGED
@@ -18,7 +18,6 @@ from llmsearch import site_stats
|
|
18 |
# this import style works in pycharm
|
19 |
from llmsearch import utilityV2 as ut
|
20 |
|
21 |
-
from urllib.request import urlopen
|
22 |
# this import style works on sever + vs code
|
23 |
# import utils
|
24 |
# from llmsearch import google_search_concurrent as gs
|
|
|
18 |
# this import style works in pycharm
|
19 |
from llmsearch import utilityV2 as ut
|
20 |
|
|
|
21 |
# this import style works on sever + vs code
|
22 |
# import utils
|
23 |
# from llmsearch import google_search_concurrent as gs
|