lgaleana commited on
Commit
86c3750
1 Parent(s): c44e323

Code task fixes

Browse files
Files changed (2) hide show
  1. actions.py +23 -43
  2. components.py +51 -38
actions.py CHANGED
@@ -34,26 +34,31 @@ def execute_task(task_id: int, active_index: int, error_value, *args):
34
  Params:
35
  - task_id: This will tell us which task to execute.
36
  - active_index: The index of the actual task that is visible.
37
- - prev_error_value: I carry around whether there is an error in the execution, to be displayed at the end.
38
  - args: Other variables that will be decomposed.
39
  """
40
  n_avail_tasks = len(Task.available_tasks)
41
- # We need to return outputs for all tasks in the row.
42
- outputs = [""] * n_avail_tasks
 
 
 
 
43
 
44
- if (
45
- active_index is None or error_value
46
- ): # Active index could be 0 == not active_index
47
- return outputs + [
48
- gr.HighlightedText.update(
49
- value=error_value, visible=error_value is not None
50
- )
51
- ]
52
 
53
  task_id = int(task_id)
54
  active_index = int(active_index)
55
  inner_n_inputs = all_tasks[task_id].inner_n_inputs
56
 
 
 
 
 
 
 
57
  start_inputs = 0
58
  end_inputs = 0
59
  end_all_inputs = sum(inner_n_inputs)
@@ -65,50 +70,25 @@ def execute_task(task_id: int, active_index: int, error_value, *args):
65
  task_inputs = args[start_inputs:end_inputs]
66
  prev_active_indexes = args[end_all_inputs : end_all_inputs + task_id]
67
  prev_task_outputs = args[end_all_inputs + task_id :]
68
- non_empty_inputs = [i for i in task_inputs if i]
69
 
70
- if len(non_empty_inputs) < len(task_inputs):
71
- return outputs + [
72
- gr.HighlightedText.update(
73
- value=[(f"Missing inputs for Task: {task_id}", "ERROR")],
74
- visible=True,
75
- )
76
- ]
77
 
 
78
  vars_in_scope = {}
79
  for i, prev_active_index in enumerate(prev_active_indexes):
80
  vars_in_scope[f"{Task.vname}{i}"] = prev_task_outputs[
81
  i * n_avail_tasks + int(prev_active_index)
82
  ]
83
- # Get all variables referenced within the task input
84
- prompt_vars = [v for ti in non_empty_inputs for v in re.findall("{(.*?)}", ti)]
85
-
86
- # If there is an undefined variable referenced, HighlightedText will signal the error.
87
- undefined_vars = prompt_vars - vars_in_scope.keys()
88
- if len(undefined_vars) > 0:
89
- outputs[active_index] = "ERROR"
90
- return outputs + [
91
- gr.HighlightedText.update(
92
- value=[
93
- (
94
- f"The variables in Task :: {task_id} are being used before being defined :: {undefined_vars}. Please check your tasks.",
95
- "ERROR",
96
- )
97
- ],
98
- visible=True,
99
- )
100
- ]
101
 
102
  try:
103
  # Task logic gets inserted into the right index
104
  outputs[active_index] = all_tasks[task_id].execute(
105
- active_index, *non_empty_inputs, vars_in_scope=vars_in_scope
106
  )
107
- return outputs + [
108
- gr.HighlightedText.update(
109
- value=error_value, visible=error_value is not None
110
- )
111
- ]
112
  except Exception as e:
113
  import traceback
114
 
 
34
  Params:
35
  - task_id: This will tell us which task to execute.
36
  - active_index: The index of the actual task that is visible.
37
+ - error_value: I carry around whether there is an error in the execution, to be displayed at the end.
38
  - args: Other variables that will be decomposed.
39
  """
40
  n_avail_tasks = len(Task.available_tasks)
41
+ outputs = [
42
+ ""
43
+ ] * n_avail_tasks # We need to return outputs for all tasks in the row.
44
+ error_update = gr.HighlightedText.update(
45
+ value=error_value, visible=error_value is not None
46
+ )
47
 
48
+ # If not task has been picked or if ther has been an error, skip.
49
+ if active_index is None or error_value: # Active index could be 0
50
+ return outputs + [error_update]
 
 
 
 
 
51
 
52
  task_id = int(task_id)
53
  active_index = int(active_index)
54
  inner_n_inputs = all_tasks[task_id].inner_n_inputs
55
 
56
+ # Decompose args
57
+ # - start_inputs: Where the active task inputs start within args.
58
+ # - end_inputs: End of the active task inputs.
59
+ # - task_inputs: The active task inputs.
60
+ # - prev_active_indexes: Indexes of the active tasks in the previous tasks.
61
+ # - prev_task_outputs: Outputs of the previous tasks.
62
  start_inputs = 0
63
  end_inputs = 0
64
  end_all_inputs = sum(inner_n_inputs)
 
70
  task_inputs = args[start_inputs:end_inputs]
71
  prev_active_indexes = args[end_all_inputs : end_all_inputs + task_id]
72
  prev_task_outputs = args[end_all_inputs + task_id :]
 
73
 
74
+ # If no inputs, skip
75
+ non_empty_inputs = [i for i in task_inputs if i]
76
+ if not non_empty_inputs:
77
+ return outputs + [error_update]
 
 
 
78
 
79
+ # Put task outputs in a dictionary with names.
80
  vars_in_scope = {}
81
  for i, prev_active_index in enumerate(prev_active_indexes):
82
  vars_in_scope[f"{Task.vname}{i}"] = prev_task_outputs[
83
  i * n_avail_tasks + int(prev_active_index)
84
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  try:
87
  # Task logic gets inserted into the right index
88
  outputs[active_index] = all_tasks[task_id].execute(
89
+ active_index, *task_inputs, vars_in_scope=vars_in_scope
90
  )
91
+ return outputs + [error_update]
 
 
 
 
92
  except Exception as e:
93
  import traceback
94
 
components.py CHANGED
@@ -53,20 +53,29 @@ class TaskComponent(ABC):
53
  self.output: gr.Textbox
54
  self._source = self.__class__.__name__
55
 
 
 
 
 
 
 
 
 
 
56
  def render(self, id_: int) -> None:
57
  self.gr_component = self._render(id_)
58
 
59
- @property
60
  @abstractmethod
61
- def inputs(self) -> List[gr.Textbox]:
62
  ...
63
 
64
  @property
65
  def n_inputs(self) -> int:
66
  return len(self.inputs)
67
 
 
68
  @abstractmethod
69
- def _render(self, id_) -> gr.Box:
70
  ...
71
 
72
  @abstractmethod
@@ -98,7 +107,7 @@ class AITask(TaskComponent):
98
  return [self.input]
99
 
100
  def execute(self, prompt: str, vars_in_scope: Dict[str, Any]) -> str:
101
- formatted_prompt = prompt.format(**vars_in_scope)
102
  return ai.llm.next([{"role": "user", "content": formatted_prompt}])
103
 
104
 
@@ -115,10 +124,10 @@ class CodeTask(TaskComponent):
115
  with gr.Row():
116
  with gr.Column():
117
  with gr.Accordion(label="Generated code", open=False) as accordion:
118
- raw_prompt_output = gr.Textbox(
119
  label="Raw output",
120
  lines=5,
121
- interactive=True,
122
  )
123
  self.packages = gr.Textbox(
124
  label="The following packages will be installed",
@@ -146,7 +155,7 @@ class CodeTask(TaskComponent):
146
  self.generate_code,
147
  inputs=[code_prompt],
148
  outputs=[
149
- raw_prompt_output,
150
  self.packages,
151
  self.function,
152
  error_message,
@@ -160,13 +169,14 @@ class CodeTask(TaskComponent):
160
  def generate_code(code_prompt: str):
161
  import json
162
 
163
- raw_prompt_output = ""
 
164
  error_message = gr.HighlightedText.update(None, visible=False)
165
  accordion = gr.Accordion.update()
166
 
167
  if not code_prompt:
168
  return (
169
- raw_prompt_output,
170
  "",
171
  "",
172
  error_message,
@@ -174,9 +184,8 @@ class CodeTask(TaskComponent):
174
  )
175
 
176
  print(f"Generating code.")
177
- parsed_output = {"packages": "", "script": ""}
178
  try:
179
- raw_prompt_output = ai.llm.next(
180
  [
181
  {
182
  "role": "user",
@@ -185,30 +194,29 @@ class CodeTask(TaskComponent):
185
  {code_prompt}
186
 
187
  Do't save anything to disk. Instead, the function should return the necessary data.
188
- Include all the necessary imports. Make sure that the package names are correct.
189
  """,
190
  }
191
  ],
192
  temperature=0,
193
  )
194
-
195
- raw_parsed_output = ai.llm.next(
196
  [
197
  {
198
  "role": "user",
199
  "content": f"""
200
- The following text has a python function with some imports that might need to be installed:
201
- {raw_prompt_output}
202
 
203
- Extract all the python packages that need to be installed with pip, nothing else.
204
- Extract the function and the imports as a single python script, nothing else.
 
205
 
206
  Write a JSON:
207
- ```
208
- {{
209
- "packages": Python list of packages to be parsed with eval(). If no packages, the list should be empty.
210
- "script": Python script to be executed with exec(). Include only the function and the imports.
211
- }}
212
  ```
213
  """,
214
  }
@@ -216,7 +224,7 @@ class CodeTask(TaskComponent):
216
  temperature=0,
217
  )
218
  parsed_output = json.loads(
219
- re.search("({.*})", raw_parsed_output, re.DOTALL).group(1)
220
  )
221
  except Exception as e:
222
  import traceback
@@ -227,9 +235,9 @@ class CodeTask(TaskComponent):
227
  )
228
  accordion = gr.Accordion.update(open=True)
229
  return (
230
- raw_prompt_output,
231
- parsed_output["packages"],
232
- parsed_output["script"].replace("```python", "").replace("```", ""),
233
  error_message,
234
  accordion,
235
  )
@@ -239,23 +247,28 @@ class CodeTask(TaskComponent):
239
  return [self.packages, self.function, self.input]
240
 
241
  def execute(
242
- self, packages: str, function: str, input: str, vars_in_scope: Dict[str, Any]
243
  ):
244
- import subprocess
245
- import sys
 
 
 
 
 
246
 
247
- for p in eval(packages):
248
- subprocess.check_call([sys.executable, "-m", "pip", "install", p])
249
  exec(function, locals())
250
  # Should be last function in scope
251
  self._toolkit_func = list(locals().items())[-1][1]
252
 
253
- formatted_input = input.format(**vars_in_scope)
254
- try:
255
- formatted_input = eval(formatted_input)
256
- except:
257
- pass
258
- return self._toolkit_func(formatted_input)
 
 
259
 
260
 
261
  class Task(Component):
 
53
  self.output: gr.Textbox
54
  self._source = self.__class__.__name__
55
 
56
+ def format_input(self, input: str, vars_in_scope: Dict[str, Any]) -> str:
57
+ prompt_vars = [v for v in re.findall("{(.*?)}", input)]
58
+ undefined_vars = prompt_vars - vars_in_scope.keys()
59
+ if len(undefined_vars) > 0:
60
+ raise KeyError(
61
+ f"The variables :: {undefined_vars} are being used before being defined."
62
+ )
63
+ return input.format(**vars_in_scope)
64
+
65
  def render(self, id_: int) -> None:
66
  self.gr_component = self._render(id_)
67
 
 
68
  @abstractmethod
69
+ def _render(self, id_) -> gr.Box:
70
  ...
71
 
72
  @property
73
  def n_inputs(self) -> int:
74
  return len(self.inputs)
75
 
76
+ @property
77
  @abstractmethod
78
+ def inputs(self) -> List[gr.Textbox]:
79
  ...
80
 
81
  @abstractmethod
 
107
  return [self.input]
108
 
109
  def execute(self, prompt: str, vars_in_scope: Dict[str, Any]) -> str:
110
+ formatted_prompt = self.format_input(prompt, vars_in_scope)
111
  return ai.llm.next([{"role": "user", "content": formatted_prompt}])
112
 
113
 
 
124
  with gr.Row():
125
  with gr.Column():
126
  with gr.Accordion(label="Generated code", open=False) as accordion:
127
+ self.raw_output = gr.Textbox(
128
  label="Raw output",
129
  lines=5,
130
+ interactive=False,
131
  )
132
  self.packages = gr.Textbox(
133
  label="The following packages will be installed",
 
155
  self.generate_code,
156
  inputs=[code_prompt],
157
  outputs=[
158
+ self.raw_output,
159
  self.packages,
160
  self.function,
161
  error_message,
 
169
  def generate_code(code_prompt: str):
170
  import json
171
 
172
+ raw_output = ""
173
+ parsed_output = {"pip": "", "script": ""}
174
  error_message = gr.HighlightedText.update(None, visible=False)
175
  accordion = gr.Accordion.update()
176
 
177
  if not code_prompt:
178
  return (
179
+ "",
180
  "",
181
  "",
182
  error_message,
 
184
  )
185
 
186
  print(f"Generating code.")
 
187
  try:
188
+ raw_output = ai.llm.next(
189
  [
190
  {
191
  "role": "user",
 
194
  {code_prompt}
195
 
196
  Do't save anything to disk. Instead, the function should return the necessary data.
197
+ Include necessary imports.
198
  """,
199
  }
200
  ],
201
  temperature=0,
202
  )
203
+ extractions = ai.llm.next(
 
204
  [
205
  {
206
  "role": "user",
207
  "content": f"""
208
+ The following text has a python function with some packages that might need to be installed:
209
+ {raw_output}
210
 
211
+ What is the pip install command to install the needed packages?
212
+ Package names in the imports and in pip might be different. Use the correct pip names.
213
+ Extract the imports and the function definition.
214
 
215
  Write a JSON:
216
+ {{
217
+ "pip": Pip command. If no packages, empty string.
218
+ "script": A python script to be executed with exec(). Include only the imports and the function definition.
219
+ }}
 
220
  ```
221
  """,
222
  }
 
224
  temperature=0,
225
  )
226
  parsed_output = json.loads(
227
+ re.search("({.*})", extractions, re.DOTALL).group(0)
228
  )
229
  except Exception as e:
230
  import traceback
 
235
  )
236
  accordion = gr.Accordion.update(open=True)
237
  return (
238
+ raw_output,
239
+ parsed_output["pip"],
240
+ parsed_output["script"],
241
  error_message,
242
  accordion,
243
  )
 
247
  return [self.packages, self.function, self.input]
248
 
249
  def execute(
250
+ self, pip_command: str, function: str, input: str, vars_in_scope: Dict[str, Any]
251
  ):
252
+ import inspect
253
+
254
+ if pip_command:
255
+ import subprocess
256
+ import sys
257
+
258
+ subprocess.check_call([sys.executable, "-m"] + pip_command.split(" "))
259
 
 
 
260
  exec(function, locals())
261
  # Should be last function in scope
262
  self._toolkit_func = list(locals().items())[-1][1]
263
 
264
+ if len(inspect.getfullargspec(self._toolkit_func)[0]) > 0:
265
+ formatted_input = self.format_input(input, vars_in_scope)
266
+ try:
267
+ formatted_input = eval(formatted_input)
268
+ except:
269
+ pass
270
+ return self._toolkit_func(formatted_input)
271
+ return self._toolkit_func()
272
 
273
 
274
  class Task(Component):