Spaces:
Running
Running
add foolproof AI API validation
Browse files
app.py
CHANGED
@@ -4,8 +4,9 @@ from config import STRONG_API_BASE, STRONG_API_KEY, STRONG_MODEL
|
|
4 |
from util import is_valid_url
|
5 |
from util import mylogger
|
6 |
from util import stream_together
|
|
|
7 |
from taskNonAI import extract_url, file_to_html, compile_pdf
|
8 |
-
|
9 |
## load data
|
10 |
from _data_test import mock_jd, mock_cv
|
11 |
## ui
|
|
|
4 |
from util import is_valid_url
|
5 |
from util import mylogger
|
6 |
from util import stream_together
|
7 |
+
from util import checkAPI
|
8 |
from taskNonAI import extract_url, file_to_html, compile_pdf
|
9 |
+
|
10 |
## load data
|
11 |
from _data_test import mock_jd, mock_cv
|
12 |
## ui
|
taskAI.py
CHANGED
@@ -5,6 +5,7 @@ from llama_index.core.llms import ChatMessage # , MessageRole
|
|
5 |
from llama_index.core import ChatPromptTemplate
|
6 |
|
7 |
from util import mylogger
|
|
|
8 |
|
9 |
logger = mylogger(__name__, "%(asctime)s:%(filename)s:%(levelname)s:%(message)s")
|
10 |
## define templates
|
@@ -81,6 +82,8 @@ class TaskAI(OpenAILike):
|
|
81 |
log(f"use context window size: {window_size} for {model}")
|
82 |
return window_size
|
83 |
|
|
|
|
|
84 |
super().__init__(
|
85 |
api_base=api["base"],
|
86 |
api_key=api["key"],
|
|
|
5 |
from llama_index.core import ChatPromptTemplate
|
6 |
|
7 |
from util import mylogger
|
8 |
+
from util import checkAPI
|
9 |
|
10 |
logger = mylogger(__name__, "%(asctime)s:%(filename)s:%(levelname)s:%(message)s")
|
11 |
## define templates
|
|
|
82 |
log(f"use context window size: {window_size} for {model}")
|
83 |
return window_size
|
84 |
|
85 |
+
checkAPI(api_base, api_key)
|
86 |
+
|
87 |
super().__init__(
|
88 |
api_base=api["base"],
|
89 |
api_key=api["key"],
|
util.py
CHANGED
@@ -22,7 +22,7 @@ def mylogger(name, format, level=logging.INFO):
|
|
22 |
return logger
|
23 |
|
24 |
|
25 |
-
def count_token(text, encoding="cl100k_base"):
|
26 |
return len(tiktoken.get_encoding(encoding).encode(text))
|
27 |
|
28 |
|
@@ -41,6 +41,9 @@ def is_valid_openai_api_key(api_base: str, api_key: str) -> bool:
|
|
41 |
|
42 |
return response.status_code == 200
|
43 |
|
|
|
|
|
|
|
44 |
|
45 |
def zip_api(api_base: str, api_key: str, model: str) -> dict[str, str]:
|
46 |
return {"base": api_base, "key": api_key, "model": model}
|
|
|
22 |
return logger
|
23 |
|
24 |
|
25 |
+
def count_token(text, encoding="cl100k_base")->int:
|
26 |
return len(tiktoken.get_encoding(encoding).encode(text))
|
27 |
|
28 |
|
|
|
41 |
|
42 |
return response.status_code == 200
|
43 |
|
44 |
+
def checkAPI(api_base: str, api_key: str):
|
45 |
+
if not is_valid_openai_api_key(api_base, api_key):
|
46 |
+
raise ValueError("Invalid API key or less possibly OpenAI's (or AI provider's) fault. Did you setup your AI APIs properly? If you don't have any API key, try get one from https://beta.openai.com/account/api-keys")
|
47 |
|
48 |
def zip_api(api_base: str, api_key: str, model: str) -> dict[str, str]:
|
49 |
return {"base": api_base, "key": api_key, "model": model}
|