Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
import logging, os, sys, time
|
3 |
|
4 |
from agent_langchain import agent_langchain
|
5 |
from agent_llamaindex import agent_llamaindex
|
@@ -9,6 +9,8 @@ from trace import trace_wandb
|
|
9 |
from dotenv import load_dotenv, find_dotenv
|
10 |
_ = load_dotenv(find_dotenv())
|
11 |
|
|
|
|
|
12 |
AGENT_OFF = "Off"
|
13 |
AGENT_LANGCHAIN = "LangChain"
|
14 |
AGENT_LLAMAINDEX = "LlamaIndex"
|
@@ -29,59 +31,62 @@ def invoke(openai_api_key, prompt, agent_option):
|
|
29 |
if (agent_option is None):
|
30 |
raise gr.Error("Use Agent is required.")
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
completion = ""
|
35 |
-
result = ""
|
36 |
-
callback = ""
|
37 |
-
err_msg = ""
|
38 |
|
39 |
-
|
40 |
-
|
|
|
|
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
prompt
|
53 |
-
)
|
54 |
-
else:
|
55 |
-
client = OpenAI()
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
)
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
err_msg = e
|
67 |
-
|
68 |
-
raise gr.Error(e)
|
69 |
-
finally:
|
70 |
-
end_time_ms = round(time.time() * 1000)
|
71 |
-
|
72 |
-
trace_wandb(
|
73 |
-
config,
|
74 |
-
agent_option,
|
75 |
-
prompt,
|
76 |
-
completion,
|
77 |
-
result,
|
78 |
-
callback,
|
79 |
-
err_msg,
|
80 |
-
start_time_ms,
|
81 |
-
end_time_ms
|
82 |
-
)
|
83 |
-
|
84 |
-
return result
|
85 |
|
86 |
gr.close_all()
|
87 |
|
|
|
1 |
import gradio as gr
|
2 |
+
import logging, os, sys, threading, time
|
3 |
|
4 |
from agent_langchain import agent_langchain
|
5 |
from agent_llamaindex import agent_llamaindex
|
|
|
9 |
from dotenv import load_dotenv, find_dotenv
|
10 |
_ = load_dotenv(find_dotenv())
|
11 |
|
12 |
+
lock = threading.Lock()
|
13 |
+
|
14 |
AGENT_OFF = "Off"
|
15 |
AGENT_LANGCHAIN = "LangChain"
|
16 |
AGENT_LLAMAINDEX = "LlamaIndex"
|
|
|
31 |
if (agent_option is None):
|
32 |
raise gr.Error("Use Agent is required.")
|
33 |
|
34 |
+
with lock:
|
35 |
+
os.environ["OPENAI_API_KEY"] = openai_api_key
|
|
|
|
|
|
|
|
|
36 |
|
37 |
+
completion = ""
|
38 |
+
result = ""
|
39 |
+
callback = ""
|
40 |
+
err_msg = ""
|
41 |
|
42 |
+
try:
|
43 |
+
start_time_ms = round(time.time() * 1000)
|
44 |
+
|
45 |
+
if (agent_option == AGENT_LANGCHAIN):
|
46 |
+
completion, callback = agent_langchain(
|
47 |
+
config,
|
48 |
+
prompt
|
49 |
+
)
|
50 |
+
|
51 |
+
result = completion["output"]
|
52 |
+
elif (agent_option == AGENT_LLAMAINDEX):
|
53 |
+
result = agent_llamaindex(
|
54 |
+
config,
|
55 |
+
prompt
|
56 |
+
)
|
57 |
+
else:
|
58 |
+
client = OpenAI()
|
59 |
+
|
60 |
+
completion = client.chat.completions.create(
|
61 |
+
messages = [{"role": "user", "content": prompt}],
|
62 |
+
model = config["model"],
|
63 |
+
temperature = config["temperature"]
|
64 |
+
)
|
65 |
|
66 |
+
callback = completion.usage
|
67 |
+
result = completion.choices[0].message.content
|
68 |
+
except Exception as e:
|
69 |
+
err_msg = e
|
|
|
|
|
|
|
|
|
70 |
|
71 |
+
raise gr.Error(e)
|
72 |
+
finally:
|
73 |
+
end_time_ms = round(time.time() * 1000)
|
74 |
+
|
75 |
+
trace_wandb(
|
76 |
+
config,
|
77 |
+
agent_option,
|
78 |
+
prompt,
|
79 |
+
completion,
|
80 |
+
result,
|
81 |
+
callback,
|
82 |
+
err_msg,
|
83 |
+
start_time_ms,
|
84 |
+
end_time_ms
|
85 |
)
|
86 |
|
87 |
+
del os.environ["OPENAI_API_KEY"]
|
88 |
+
|
89 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
gr.close_all()
|
92 |
|