dai commited on
Commit
074c4ae
1 Parent(s): 2a6f170

add chatbox. add examples.

Browse files
Files changed (5) hide show
  1. .gitignore +2 -1
  2. app.py +70 -27
  3. lmp.py +11 -11
  4. md_logger.py +11 -2
  5. requirements.txt +2 -1
.gitignore CHANGED
@@ -127,6 +127,7 @@ venv/
127
  ENV/
128
  env.bak/
129
  venv.bak/
 
130
 
131
  # Spyder project settings
132
  .spyderproject
@@ -159,4 +160,4 @@ cython_debug/
159
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
  #.idea/
161
 
162
- .DS_Store
 
127
  ENV/
128
  env.bak/
129
  venv.bak/
130
+ .python-version
131
 
132
  # Spyder project settings
133
  .spyderproject
 
160
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
161
  #.idea/
162
 
163
+ .DS_Store
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import openai
2
  import numpy as np
3
  from tempfile import NamedTemporaryFile
@@ -8,20 +9,23 @@ from shapely.affinity import *
8
  from omegaconf import OmegaConf
9
  from moviepy.editor import ImageSequenceClip
10
  import gradio as gr
 
11
 
12
  from lmp import LMP, LMPFGen
13
  from sim import PickPlaceEnv, LMP_wrapper
14
  from consts import ALL_BLOCKS, ALL_BOWLS
15
  from md_logger import MarkdownLogger
16
 
 
17
 
18
  class DemoRunner:
19
-
20
  def __init__(self):
21
  self._cfg = OmegaConf.to_container(OmegaConf.load('cfg.yaml'), resolve=True)
22
  self._env = None
23
  self._model_name = ''
24
  self._md_logger = MarkdownLogger()
 
25
 
26
  def make_LMP(self, env):
27
  # LMP env wrapper
@@ -50,7 +54,9 @@ class DemoRunner:
50
  'get_corner_name', 'get_side_name',
51
  ]
52
  }
53
- variable_vars['say'] = lambda msg: self._md_logger.log_text(f'Robot says: "{msg}"')
 
 
54
 
55
  # creating the function-generating LMP
56
  lmp_fgen = LMPFGen(cfg['lmps']['fgen'], fixed_vars, variable_vars, self._md_logger)
@@ -86,9 +92,9 @@ class DemoRunner:
86
 
87
  return info, img
88
 
89
- def run(self, instruction):
90
  if self._env is None:
91
- return 'Please run setup first!', None, None
92
 
93
  self._env.cache_video = []
94
  self._md_logger.clear()
@@ -96,7 +102,7 @@ class DemoRunner:
96
  try:
97
  self._lmp_tabletop_ui(instruction, f'objects = {self._env.object_list}')
98
  except Exception as e:
99
- return f'Error: {e}', None, None
100
 
101
  video_file_name = None
102
  if self._env.cache_video:
@@ -104,27 +110,39 @@ class DemoRunner:
104
  video_file_name = NamedTemporaryFile(suffix='.mp4').name
105
  rendered_clip.write_videofile(video_file_name, fps=25)
106
 
107
- return self._md_logger.get_log(), self._env.get_camera_image(), video_file_name
 
 
 
 
 
 
 
108
 
109
 
110
  def setup(api_key, model_name, n_blocks, n_bowls):
111
  if not api_key:
112
  return 'Please enter your OpenAI API key!', None, None
113
-
114
  if n_blocks + n_bowls == 0:
115
  return 'Please select at least one object!', None, None
116
 
117
  demo_runner = DemoRunner()
118
-
119
  info, img = demo_runner.setup(api_key, model_name, n_blocks, n_bowls)
120
- return info, img, demo_runner
 
121
 
122
 
123
- def run(instruction, demo_runner):
124
  if demo_runner is None:
125
- return 'Please run setup first!', None, None
126
- return demo_runner.run(instruction)
 
127
 
 
 
 
128
 
129
  if __name__ == '__main__':
130
  with open('README.md', 'r') as f:
@@ -140,35 +158,60 @@ if __name__ == '__main__':
140
  with gr.Row():
141
  with gr.Column():
142
  with gr.Row():
143
- inp_api_key = gr.Textbox(label='OpenAI API Key (this is not stored anywhere)', lines=1)
 
144
  inp_model_name = gr.Dropdown(label='Model Name', choices=['code-davinci-002', 'text-davinci-002'], value='code-davinci-002')
145
  with gr.Row():
146
- inp_n_blocks = gr.Slider(label='Number of Blocks', minimum=0, maximum=4, value=3, step=1)
147
- inp_n_bowls = gr.Slider(label='Number of Bowls', minimum=0, maximum=4, value=3, step=1)
148
-
149
  btn_setup = gr.Button("Setup/Reset Simulation")
150
  info_setup = gr.Markdown(label='Setup Info')
151
  with gr.Column():
152
- img_setup = gr.Image(label='Current Simulation')
153
 
154
  with gr.Row():
155
  with gr.Column():
156
-
157
- inp_instruction = gr.Textbox(label='Instruction', lines=1)
 
 
 
 
 
 
 
 
 
158
  btn_run = gr.Button("Run (this may take 30+ seconds)")
159
  info_run = gr.Markdown(label='Generated Code')
160
  with gr.Column():
161
  video_run = gr.Video(label='Video of Last Instruction')
162
-
163
  btn_setup.click(
164
- setup,
165
- inputs=[inp_api_key, inp_model_name, inp_n_blocks, inp_n_bowls],
166
- outputs=[info_setup, img_setup, state]
167
  )
168
  btn_run.click(
169
- run,
170
- inputs=[inp_instruction, state],
171
- outputs=[info_run, img_setup, video_run]
 
 
 
 
172
  )
173
-
 
 
 
 
 
 
 
 
 
 
 
174
  demo.launch()
 
1
+ import os
2
  import openai
3
  import numpy as np
4
  from tempfile import NamedTemporaryFile
 
9
  from omegaconf import OmegaConf
10
  from moviepy.editor import ImageSequenceClip
11
  import gradio as gr
12
+ from gradio import processing_utils
13
 
14
  from lmp import LMP, LMPFGen
15
  from sim import PickPlaceEnv, LMP_wrapper
16
  from consts import ALL_BLOCKS, ALL_BOWLS
17
  from md_logger import MarkdownLogger
18
 
19
+ default_open_ai_key = os.getenv('OPEN_AI_SECRET')
20
 
21
  class DemoRunner:
22
+
23
  def __init__(self):
24
  self._cfg = OmegaConf.to_container(OmegaConf.load('cfg.yaml'), resolve=True)
25
  self._env = None
26
  self._model_name = ''
27
  self._md_logger = MarkdownLogger()
28
+ self._temp_file_manager = processing_utils.TempFileManager()
29
 
30
  def make_LMP(self, env):
31
  # LMP env wrapper
 
54
  'get_corner_name', 'get_side_name',
55
  ]
56
  }
57
+ # variable_vars['say'] = lambda msg: self._md_logger.log_text(f'Robot says: "{msg}"')
58
+ variable_vars['say'] = lambda msg: self._md_logger.log_message(
59
+ f'{msg}')
60
 
61
  # creating the function-generating LMP
62
  lmp_fgen = LMPFGen(cfg['lmps']['fgen'], fixed_vars, variable_vars, self._md_logger)
 
92
 
93
  return info, img
94
 
95
+ def run(self, instruction, history):
96
  if self._env is None:
97
+ return 'Please run setup first!', None, None, history
98
 
99
  self._env.cache_video = []
100
  self._md_logger.clear()
 
102
  try:
103
  self._lmp_tabletop_ui(instruction, f'objects = {self._env.object_list}')
104
  except Exception as e:
105
+ return f'Error: {e}', None, None, history
106
 
107
  video_file_name = None
108
  if self._env.cache_video:
 
110
  video_file_name = NamedTemporaryFile(suffix='.mp4').name
111
  rendered_clip.write_videofile(video_file_name, fps=25)
112
 
113
+ # Update chat messages
114
+ for message in self._md_logger.get_messages():
115
+ history.append((None, message))
116
+ if self._env.cache_video:
117
+ temp_name = self._temp_file_manager.make_temp_copy_if_needed(video_file_name)
118
+ history.append((None, (temp_name, )))
119
+
120
+ return self._md_logger.get_log(), self._env.get_camera_image(), video_file_name, history
121
 
122
 
123
  def setup(api_key, model_name, n_blocks, n_bowls):
124
  if not api_key:
125
  return 'Please enter your OpenAI API key!', None, None
126
+
127
  if n_blocks + n_bowls == 0:
128
  return 'Please select at least one object!', None, None
129
 
130
  demo_runner = DemoRunner()
131
+
132
  info, img = demo_runner.setup(api_key, model_name, n_blocks, n_bowls)
133
+ welcome_message = 'How can I help you?'
134
+ return info, img, demo_runner, [(None, welcome_message)]
135
 
136
 
137
+ def run(demo_runner, chat_history):
138
  if demo_runner is None:
139
+ return 'Please run setup first!', None, None, chat_history, None
140
+ instruction = chat_history[-1][0]
141
+ return *demo_runner.run(instruction, chat_history), ''
142
 
143
+ def submit_chat(chat_message, history):
144
+ history += [[chat_message, None]]
145
+ return '', history
146
 
147
  if __name__ == '__main__':
148
  with open('README.md', 'r') as f:
 
158
  with gr.Row():
159
  with gr.Column():
160
  with gr.Row():
161
+ inp_api_key = gr.Textbox(value=default_open_ai_key,
162
+ label='OpenAI API Key (this is not stored anywhere)', lines=1)
163
  inp_model_name = gr.Dropdown(label='Model Name', choices=['code-davinci-002', 'text-davinci-002'], value='code-davinci-002')
164
  with gr.Row():
165
+ inp_n_blocks = gr.Slider(label='Number of Blocks', minimum=0, maximum=5, value=3, step=1)
166
+ inp_n_bowls = gr.Slider(label='Number of Bowls', minimum=0, maximum=5, value=3, step=1)
167
+
168
  btn_setup = gr.Button("Setup/Reset Simulation")
169
  info_setup = gr.Markdown(label='Setup Info')
170
  with gr.Column():
171
+ img_setup = gr.Image(label='Current Simulation State')
172
 
173
  with gr.Row():
174
  with gr.Column():
175
+ chat_box = gr.Chatbot()
176
+ inp_instruction = gr.Textbox(label='Instruction', lines=1)
177
+ examples = gr.Examples(
178
+ [
179
+ 'stack two of the blocks',
180
+ 'what color is the rightmost block?',
181
+ 'arrange the blocks into figure 3',
182
+ 'put blocks into non-matching bowls'
183
+ ],
184
+ inp_instruction,
185
+ )
186
  btn_run = gr.Button("Run (this may take 30+ seconds)")
187
  info_run = gr.Markdown(label='Generated Code')
188
  with gr.Column():
189
  video_run = gr.Video(label='Video of Last Instruction')
190
+
191
  btn_setup.click(
192
+ setup,
193
+ inputs=[inp_api_key, inp_model_name, inp_n_blocks, inp_n_bowls],
194
+ outputs=[info_setup, img_setup, state, chat_box],
195
  )
196
  btn_run.click(
197
+ submit_chat,
198
+ [inp_instruction, chat_box],
199
+ [inp_instruction, chat_box],
200
+ ).then(
201
+ run,
202
+ inputs=[state, chat_box],
203
+ outputs=[info_run, img_setup, video_run, chat_box, inp_instruction],
204
  )
205
+ inp_instruction.submit(
206
+ submit_chat,
207
+ [inp_instruction, chat_box],
208
+ [inp_instruction, chat_box],
209
+ ).then(
210
+ run,
211
+ inputs=[state, chat_box],
212
+ outputs=[info_run, img_setup, video_run,
213
+ chat_box, inp_instruction],
214
+ )
215
+
216
+ demo.queue()
217
  demo.launch()
lmp.py CHANGED
@@ -62,8 +62,8 @@ class LMP:
62
  break
63
  except (RateLimitError, APIConnectionError) as e:
64
  print(f'OpenAI API got err {e}')
65
- print('Retrying after 10s.')
66
- sleep(10)
67
 
68
  if self._cfg['include_context'] and context != '':
69
  to_exec = f'{context}\n{code_str}'
@@ -117,7 +117,7 @@ class LMPFGen:
117
  while True:
118
  try:
119
  f_src = openai.Completion.create(
120
- prompt=prompt,
121
  stop=self._stop_tokens,
122
  temperature=self._cfg['temperature'],
123
  engine=self._cfg['engine'],
@@ -126,8 +126,8 @@ class LMPFGen:
126
  break
127
  except (RateLimitError, APIConnectionError) as e:
128
  print(f'OpenAI API got err {e}')
129
- print('Retrying after 10s.')
130
- sleep(10)
131
 
132
  if fix_bugs:
133
  f_src = openai.Edit.create(
@@ -141,7 +141,7 @@ class LMPFGen:
141
  other_vars = {}
142
  gvars = merge_dicts([self._fixed_vars, self._variable_vars, other_vars])
143
  lvars = {}
144
-
145
  exec_safe(f_src, gvars, lvars)
146
 
147
  f = lvars[f_name]
@@ -187,9 +187,9 @@ class LMPFGen:
187
  # redefine parent f so newly created child_fs are in scope
188
  gvars = merge_dicts([self._fixed_vars, self._variable_vars, new_fs, other_vars])
189
  lvars = {}
190
-
191
  exec_safe(f_src, gvars, lvars)
192
-
193
  f = lvars[f_name]
194
 
195
  new_fs[f_name], srcs[f_name] = f, f_src
@@ -235,17 +235,17 @@ def var_exists(name, all_vars):
235
 
236
  def merge_dicts(dicts):
237
  return {
238
- k : v
239
  for d in dicts
240
  for k, v in d.items()
241
  }
242
-
243
 
244
  def exec_safe(code_str, gvars=None, lvars=None):
245
  banned_phrases = ['import', '__']
246
  for phrase in banned_phrases:
247
  assert phrase not in code_str
248
-
249
  if gvars is None:
250
  gvars = {}
251
  if lvars is None:
 
62
  break
63
  except (RateLimitError, APIConnectionError) as e:
64
  print(f'OpenAI API got err {e}')
65
+ print('Retrying after 2s.')
66
+ sleep(2)
67
 
68
  if self._cfg['include_context'] and context != '':
69
  to_exec = f'{context}\n{code_str}'
 
117
  while True:
118
  try:
119
  f_src = openai.Completion.create(
120
+ prompt=prompt,
121
  stop=self._stop_tokens,
122
  temperature=self._cfg['temperature'],
123
  engine=self._cfg['engine'],
 
126
  break
127
  except (RateLimitError, APIConnectionError) as e:
128
  print(f'OpenAI API got err {e}')
129
+ print('Retrying after 2s.')
130
+ sleep(2)
131
 
132
  if fix_bugs:
133
  f_src = openai.Edit.create(
 
141
  other_vars = {}
142
  gvars = merge_dicts([self._fixed_vars, self._variable_vars, other_vars])
143
  lvars = {}
144
+
145
  exec_safe(f_src, gvars, lvars)
146
 
147
  f = lvars[f_name]
 
187
  # redefine parent f so newly created child_fs are in scope
188
  gvars = merge_dicts([self._fixed_vars, self._variable_vars, new_fs, other_vars])
189
  lvars = {}
190
+
191
  exec_safe(f_src, gvars, lvars)
192
+
193
  f = lvars[f_name]
194
 
195
  new_fs[f_name], srcs[f_name] = f, f_src
 
235
 
236
  def merge_dicts(dicts):
237
  return {
238
+ k : v
239
  for d in dicts
240
  for k, v in d.items()
241
  }
242
+
243
 
244
  def exec_safe(code_str, gvars=None, lvars=None):
245
  banned_phrases = ['import', '__']
246
  for phrase in banned_phrases:
247
  assert phrase not in code_str
248
+
249
  if gvars is None:
250
  gvars = {}
251
  if lvars is None:
md_logger.py CHANGED
@@ -2,15 +2,24 @@ class MarkdownLogger:
2
 
3
  def __init__(self):
4
  self._log = ''
 
5
 
6
  def log_text(self, text):
7
  self._log += '\n' + text + '\n'
8
-
9
  def log_code(self, code):
10
  self._log += f'\n```python\n{code}\n```\n'
11
 
 
 
 
12
  def clear(self):
13
  self._log = ''
14
 
15
  def get_log(self):
16
- return self._log
 
 
 
 
 
 
2
 
3
  def __init__(self):
4
  self._log = ''
5
+ self._messages = []
6
 
7
  def log_text(self, text):
8
  self._log += '\n' + text + '\n'
9
+
10
  def log_code(self, code):
11
  self._log += f'\n```python\n{code}\n```\n'
12
 
13
+ def log_message(self, text):
14
+ self._messages.append(text)
15
+
16
  def clear(self):
17
  self._log = ''
18
 
19
  def get_log(self):
20
+ return self._log
21
+
22
+ def get_messages(self):
23
+ m = self._messages
24
+ self._messages = []
25
+ return m
requirements.txt CHANGED
@@ -8,4 +8,5 @@ pybullet
8
  imageio==2.4.1
9
  imageio-ffmpeg
10
  moviepy
11
- omegaconf
 
 
8
  imageio==2.4.1
9
  imageio-ffmpeg
10
  moviepy
11
+ omegaconf
12
+ gradio