Implemented sequential execution
Browse files- .env.example +2 -0
- .gitignore +2 -1
- ai/__init__.py +1 -0
- ai/image.py +19 -0
- ai/llm.py +40 -0
- app.py +46 -14
- requirements.txt +3 -1
- utils/__init__.py +0 -0
- utils/io.py +12 -0
.env.example
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
OPENAI_KEY_PERSONAL=<openai_key>
|
2 |
+
GOOGLE_APPLICATION_KEY=<gcp_key_json>
|
.gitignore
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
venv
|
2 |
-
.vscode
|
|
|
|
1 |
venv
|
2 |
+
.vscode
|
3 |
+
.env
|
ai/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from . import image, llm
|
ai/image.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Any, Dict, List
|
3 |
+
|
4 |
+
import openai
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
|
10 |
+
openai.api_key = os.environ["OPENAI_KEY_PERSONAL"]
|
11 |
+
|
12 |
+
|
13 |
+
def gen(prompt: str, n: int, size: str) -> Dict[str, Any]:
|
14 |
+
return openai.Image.create(prompt=prompt, n=n, size=size) # type: ignore
|
15 |
+
|
16 |
+
|
17 |
+
def urls(prompt: str, n: int = 1, size: str = "512x512") -> List[str]:
|
18 |
+
images = gen(prompt, n, size)
|
19 |
+
return [i["url"] for i in images["data"]] # type: ignore
|
ai/llm.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Any, Dict, List, Optional
|
3 |
+
|
4 |
+
import openai
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
|
10 |
+
openai.api_key = os.environ["OPENAI_KEY_PERSONAL"]
|
11 |
+
MODEL = "gpt-3.5-turbo"
|
12 |
+
TEMPERATURE = 0.7
|
13 |
+
|
14 |
+
|
15 |
+
def call(
|
16 |
+
messages: List[Dict[str, str]],
|
17 |
+
model: Optional[str] = None,
|
18 |
+
temperature: Optional[float] = None,
|
19 |
+
stop: Optional[str] = None,
|
20 |
+
) -> Dict[str, Any]:
|
21 |
+
if not model:
|
22 |
+
model = MODEL
|
23 |
+
if not temperature:
|
24 |
+
temperature = TEMPERATURE
|
25 |
+
|
26 |
+
return openai.ChatCompletion.create( # type: ignore
|
27 |
+
model=model,
|
28 |
+
messages=messages,
|
29 |
+
temperature=temperature,
|
30 |
+
stop=stop,
|
31 |
+
)
|
32 |
+
|
33 |
+
|
34 |
+
def next(
|
35 |
+
messages: List[Dict[str, str]],
|
36 |
+
model: Optional[str] = None,
|
37 |
+
temperature: Optional[float] = None,
|
38 |
+
stop: Optional[str] = None,
|
39 |
+
) -> str:
|
40 |
+
return call(messages, model, temperature, stop)["choices"][0]["message"]["content"]
|
app.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
-
from typing import NamedTuple, Type, Union
|
2 |
-
|
3 |
|
4 |
import gradio as gr
|
5 |
|
|
|
|
|
|
|
6 |
|
7 |
MAX_INPUTS = 10
|
8 |
MAX_TASKS = 50
|
@@ -23,6 +25,9 @@ class Input:
|
|
23 |
)
|
24 |
return gr_component
|
25 |
|
|
|
|
|
|
|
26 |
|
27 |
class AITask:
|
28 |
@property
|
@@ -53,16 +58,23 @@ class AITask:
|
|
53 |
)
|
54 |
return gr_component
|
55 |
|
|
|
|
|
|
|
|
|
56 |
|
57 |
class Component:
|
58 |
-
def __init__(
|
|
|
|
|
59 |
# Internal state
|
60 |
self._id = id_
|
61 |
self.internal = internal
|
|
|
62 |
self._initial_visibility = visible
|
63 |
|
64 |
# Gradio state
|
65 |
-
self.component_id: gr.
|
66 |
self.source: gr.Textbox
|
67 |
self.visible: gr.Number
|
68 |
self.gr_component = gr.Box
|
@@ -70,13 +82,17 @@ class Component:
|
|
70 |
self.output: gr.Textbox
|
71 |
|
72 |
def render(self) -> None:
|
73 |
-
self.component_id = gr.
|
74 |
-
self.source = gr.Textbox(value=self.
|
75 |
self.visible = gr.Number(int(self._initial_visibility), visible=False)
|
76 |
self.gr_component = self.internal.render(self._initial_visibility)
|
77 |
self.output_name = self.internal.output_name
|
78 |
self.output = self.internal.output
|
79 |
|
|
|
|
|
|
|
|
|
80 |
|
81 |
class Variable(NamedTuple):
|
82 |
source: Type[Union[Input, AITask]]
|
@@ -85,8 +101,8 @@ class Variable(NamedTuple):
|
|
85 |
value: str
|
86 |
|
87 |
|
88 |
-
all_inputs = {i: Component(i, Input()) for i in range(MAX_INPUTS)}
|
89 |
-
all_tasks = {i: Component(i, AITask()) for i in range(MAX_TASKS)}
|
90 |
|
91 |
all_inputs[0]._initial_visibility = True
|
92 |
all_tasks[0]._initial_visibility = True
|
@@ -105,7 +121,6 @@ def add_input(*visibility):
|
|
105 |
|
106 |
def remove_input(*visibility):
|
107 |
for i, visible in reversed(list(enumerate(visibility, 1))):
|
108 |
-
print(i, visible)
|
109 |
if bool(visible):
|
110 |
return (
|
111 |
[gr.Row.update(visible=True)] * (i - 1)
|
@@ -128,7 +143,6 @@ def add_task(*visibility):
|
|
128 |
|
129 |
def remove_task(*visibility):
|
130 |
for i, visible in reversed(list(enumerate(visibility, 1))):
|
131 |
-
print(i, visible)
|
132 |
if bool(visible):
|
133 |
return (
|
134 |
[gr.Box.update(visible=True)] * (i - 1)
|
@@ -138,6 +152,11 @@ def remove_task(*visibility):
|
|
138 |
)
|
139 |
|
140 |
|
|
|
|
|
|
|
|
|
|
|
141 |
with gr.Blocks() as demo:
|
142 |
# Initial layout
|
143 |
for i in all_inputs.values():
|
@@ -166,26 +185,39 @@ with gr.Blocks() as demo:
|
|
166 |
add_input_btn.click(
|
167 |
add_input,
|
168 |
inputs=[i.visible for i in all_inputs.values()],
|
169 |
-
outputs=[i.gr_component for i in all_inputs.values()]
|
170 |
+ [i.visible for i in all_inputs.values()],
|
171 |
)
|
172 |
remove_input_btn.click(
|
173 |
remove_input,
|
174 |
inputs=[i.visible for i in all_inputs.values()],
|
175 |
-
outputs=[i.gr_component for i in all_inputs.values()]
|
176 |
+ [i.visible for i in all_inputs.values()],
|
177 |
)
|
178 |
add_task_btn.click(
|
179 |
add_task,
|
180 |
inputs=[i.visible for i in all_tasks.values()],
|
181 |
-
outputs=[i.gr_component for i in all_tasks.values()]
|
182 |
+ [i.visible for i in all_tasks.values()],
|
183 |
)
|
184 |
remove_task_btn.click(
|
185 |
remove_task,
|
186 |
inputs=[i.visible for i in all_tasks.values()],
|
187 |
-
outputs=[i.gr_component for i in all_tasks.values()]
|
188 |
+ [i.visible for i in all_tasks.values()],
|
189 |
)
|
190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
demo.launch()
|
|
|
1 |
+
from typing import NamedTuple, Optional, Type, Union
|
|
|
2 |
|
3 |
import gradio as gr
|
4 |
|
5 |
+
import ai
|
6 |
+
from utils.io import print_system
|
7 |
+
|
8 |
|
9 |
MAX_INPUTS = 10
|
10 |
MAX_TASKS = 50
|
|
|
25 |
)
|
26 |
return gr_component
|
27 |
|
28 |
+
def execute(self) -> None:
|
29 |
+
pass
|
30 |
+
|
31 |
|
32 |
class AITask:
|
33 |
@property
|
|
|
58 |
)
|
59 |
return gr_component
|
60 |
|
61 |
+
def execute(self, prompt: str) -> Optional[str]:
|
62 |
+
if prompt:
|
63 |
+
return ai.llm.next([{"role": "user", "content": prompt}])
|
64 |
+
|
65 |
|
66 |
class Component:
|
67 |
+
def __init__(
|
68 |
+
self, id_: float, internal: Union[Input, AITask], visible: bool = False
|
69 |
+
):
|
70 |
# Internal state
|
71 |
self._id = id_
|
72 |
self.internal = internal
|
73 |
+
self._source = self.internal.__class__.__name__
|
74 |
self._initial_visibility = visible
|
75 |
|
76 |
# Gradio state
|
77 |
+
self.component_id: gr.Number
|
78 |
self.source: gr.Textbox
|
79 |
self.visible: gr.Number
|
80 |
self.gr_component = gr.Box
|
|
|
82 |
self.output: gr.Textbox
|
83 |
|
84 |
def render(self) -> None:
|
85 |
+
self.component_id = gr.Number(value=self._id, visible=False)
|
86 |
+
self.source = gr.Textbox(value=self._source, visible=False)
|
87 |
self.visible = gr.Number(int(self._initial_visibility), visible=False)
|
88 |
self.gr_component = self.internal.render(self._initial_visibility)
|
89 |
self.output_name = self.internal.output_name
|
90 |
self.output = self.internal.output
|
91 |
|
92 |
+
def execute(self, *args):
|
93 |
+
print_system(f"Executing component :: {self._source}.{self._id}")
|
94 |
+
return self.internal.execute(*args)
|
95 |
+
|
96 |
|
97 |
class Variable(NamedTuple):
|
98 |
source: Type[Union[Input, AITask]]
|
|
|
101 |
value: str
|
102 |
|
103 |
|
104 |
+
all_inputs = {float(i): Component(i, Input()) for i in range(MAX_INPUTS)}
|
105 |
+
all_tasks = {float(i): Component(i, AITask()) for i in range(MAX_TASKS)}
|
106 |
|
107 |
all_inputs[0]._initial_visibility = True
|
108 |
all_tasks[0]._initial_visibility = True
|
|
|
121 |
|
122 |
def remove_input(*visibility):
|
123 |
for i, visible in reversed(list(enumerate(visibility, 1))):
|
|
|
124 |
if bool(visible):
|
125 |
return (
|
126 |
[gr.Row.update(visible=True)] * (i - 1)
|
|
|
143 |
|
144 |
def remove_task(*visibility):
|
145 |
for i, visible in reversed(list(enumerate(visibility, 1))):
|
|
|
146 |
if bool(visible):
|
147 |
return (
|
148 |
[gr.Box.update(visible=True)] * (i - 1)
|
|
|
152 |
)
|
153 |
|
154 |
|
155 |
+
def execute_task(id_: float, prompt: str):
|
156 |
+
if prompt:
|
157 |
+
return all_tasks[id_].execute(prompt)
|
158 |
+
|
159 |
+
|
160 |
with gr.Blocks() as demo:
|
161 |
# Initial layout
|
162 |
for i in all_inputs.values():
|
|
|
185 |
add_input_btn.click(
|
186 |
add_input,
|
187 |
inputs=[i.visible for i in all_inputs.values()],
|
188 |
+
outputs=[i.gr_component for i in all_inputs.values()] # type: ignore
|
189 |
+ [i.visible for i in all_inputs.values()],
|
190 |
)
|
191 |
remove_input_btn.click(
|
192 |
remove_input,
|
193 |
inputs=[i.visible for i in all_inputs.values()],
|
194 |
+
outputs=[i.gr_component for i in all_inputs.values()] # type: ignore
|
195 |
+ [i.visible for i in all_inputs.values()],
|
196 |
)
|
197 |
add_task_btn.click(
|
198 |
add_task,
|
199 |
inputs=[i.visible for i in all_tasks.values()],
|
200 |
+
outputs=[i.gr_component for i in all_tasks.values()] # type: ignore
|
201 |
+ [i.visible for i in all_tasks.values()],
|
202 |
)
|
203 |
remove_task_btn.click(
|
204 |
remove_task,
|
205 |
inputs=[i.visible for i in all_tasks.values()],
|
206 |
+
outputs=[i.gr_component for i in all_tasks.values()] # type: ignore
|
207 |
+ [i.visible for i in all_tasks.values()],
|
208 |
)
|
209 |
|
210 |
+
# Sequential execution
|
211 |
+
execution_event = execute_btn.click(
|
212 |
+
execute_task,
|
213 |
+
inputs=[all_tasks[0].component_id, all_tasks[0].internal.prompt], # type: ignore
|
214 |
+
outputs=[all_tasks[0].output],
|
215 |
+
)
|
216 |
+
for task in list(all_tasks.values())[1:]:
|
217 |
+
execution_event = execution_event.then(
|
218 |
+
execute_task,
|
219 |
+
inputs=[task.component_id, task.internal.prompt], # type: ignore
|
220 |
+
outputs=[task.output],
|
221 |
+
)
|
222 |
+
|
223 |
demo.launch()
|
requirements.txt
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
gradio
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
openai
|
3 |
+
python-dotenv
|
utils/__init__.py
ADDED
File without changes
|
utils/io.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def print_system(message) -> str:
|
2 |
+
print(f"\033[0;0m{message}")
|
3 |
+
return message
|
4 |
+
|
5 |
+
|
6 |
+
def print_assistant(message) -> str:
|
7 |
+
print(f"\033[92m{message}")
|
8 |
+
return message
|
9 |
+
|
10 |
+
|
11 |
+
def user_input(message: str = "") -> str:
|
12 |
+
return input(f"\033[1;34m{message}")
|