Spaces:
Build error
Build error
File size: 8,295 Bytes
074c4ae 9a40e4f 47097db 9a40e4f 074c4ae 0a431d7 9a40e4f 0a431d7 05e2cb7 074c4ae 9a40e4f 47097db 9a40e4f 074c4ae 9a40e4f 47097db 9a40e4f 47097db 9a40e4f 47097db 9a40e4f 588af69 715fc3c 9a40e4f fc126c5 9a40e4f 074c4ae 9a40e4f 05e2cb7 9a40e4f 47097db 9a40e4f 47097db 05e2cb7 9a40e4f 074c4ae 05e2cb7 074c4ae 05e2cb7 074c4ae 05e2cb7 9a40e4f 922e0e8 05e2cb7 922e0e8 05e2cb7 922e0e8 9a40e4f 074c4ae 922e0e8 074c4ae 05e2cb7 9a40e4f 922e0e8 074c4ae 2f89158 074c4ae 922e0e8 074c4ae 922e0e8 0a431d7 25268db 0a431d7 3057ef1 0a431d7 05e2cb7 0a431d7 05e2cb7 0a431d7 05e2cb7 0a431d7 05e2cb7 0a431d7 074c4ae 0a431d7 05e2cb7 a9fbe7d 9a40e4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
import os
import openai
import numpy as np
from tempfile import NamedTemporaryFile
import copy
import shapely
from shapely.geometry import *
from shapely.affinity import *
from omegaconf import OmegaConf
from moviepy.editor import ImageSequenceClip
import gradio as gr
from lmp import LMP, LMPFGen
from sim import PickPlaceEnv, LMP_wrapper
from consts import ALL_BLOCKS, ALL_BOWLS
from md_logger import MarkdownLogger
default_open_ai_key = os.getenv('OPEN_AI_SECRET')
chain_of_thought_affix = ' with a step by step explanation'
ask_for_clarification_affix = ' or ask for clarification if you feel unclear'
class DemoRunner:
def __init__(self):
self._cfg = OmegaConf.to_container(OmegaConf.load('cfg.yaml'), resolve=True)
self._env = None
self._model_name = ''
self._md_logger = MarkdownLogger()
def make_LMP(self, env):
# LMP env wrapper
cfg = copy.deepcopy(self._cfg)
cfg['env'] = {
'init_objs': list(env.obj_name_to_id.keys()),
'coords': cfg['tabletop_coords']
}
for vs in cfg['lmps'].values():
vs['engine'] = self._model_name
LMP_env = LMP_wrapper(env, cfg)
# creating APIs that the LMPs can interact with
fixed_vars = {
'np': np
}
fixed_vars.update({
name: eval(name)
for name in shapely.geometry.__all__ + shapely.affinity.__all__
})
variable_vars = {
k: getattr(LMP_env, k)
for k in [
'get_bbox', 'get_obj_pos', 'get_color', 'is_obj_visible', 'denormalize_xy',
'put_first_on_second', 'get_obj_names',
'get_corner_name', 'get_side_name',
]
}
# variable_vars['say'] = lambda msg: self._md_logger.log_text(f'Robot says: "{msg}"')
variable_vars['say'] = lambda msg: self._md_logger.log_message(
f'{msg}')
# creating the function-generating LMP
lmp_fgen = LMPFGen(cfg['lmps']['fgen'], fixed_vars, variable_vars, self._md_logger)
# creating other low-level LMPs
variable_vars.update({
k: LMP(k, cfg['lmps'][k], lmp_fgen, fixed_vars, variable_vars, self._md_logger)
for k in ['parse_obj_name', 'parse_position', 'parse_question', 'transform_shape_pts']
})
# creating the LMP that deals w/ high-level language commands
lmp_tabletop_ui = LMP(
'tabletop_ui', cfg['lmps']['tabletop_ui'], lmp_fgen, fixed_vars, variable_vars, self._md_logger
)
return lmp_tabletop_ui
def setup(self, api_key, model_name, n_blocks, n_bowls):
openai.api_key = api_key
self._model_name = model_name
self._env = PickPlaceEnv(render=True, high_res=True, high_frame_rate=False)
list_idxs = np.random.choice(len(ALL_BLOCKS), size=max(n_blocks, n_bowls), replace=False)
block_list = [ALL_BLOCKS[i] for i in list_idxs[:n_blocks]]
bowl_list = [ALL_BOWLS[i] for i in list_idxs[:n_bowls]]
obj_list = block_list + bowl_list
self._env.reset(obj_list)
self._lmp_tabletop_ui = self.make_LMP(self._env)
info = '### Available Objects: \n- ' + '\n- '.join(obj_list)
img = self._env.get_camera_image()
return info, img
def run(self, instruction, history):
if self._env is None:
return 'Please run setup first!', None, history
self._env.cache_video = []
self._md_logger.clear()
try:
self._lmp_tabletop_ui(instruction, f'objects = {self._env.object_list}')
except Exception as e:
return f'Error: {e}', None, history
# Update chat messages
for message in self._md_logger.get_messages():
history.append((None, message))
if self._env.cache_video:
rendered_clip = ImageSequenceClip(self._env.cache_video, fps=25)
video_file_name = NamedTemporaryFile(suffix='.mp4').name
rendered_clip.write_videofile(video_file_name, fps=25)
history.append((None, (video_file_name, )))
return self._md_logger.get_log(), self._env.get_camera_image(), history
def setup(api_key, model_name, n_blocks, n_bowls):
if not api_key:
return 'Please enter your OpenAI API key!', None
if n_blocks + n_bowls == 0:
return 'Please select at least one object!', None
demo_runner = DemoRunner()
info, img = demo_runner.setup(api_key, model_name, n_blocks, n_bowls)
welcome_message = 'How can I help you?'
return info, img, demo_runner, [(None, welcome_message)], None
def run(demo_runner, chat_history):
if demo_runner is None:
return 'Please run setup first!', None, None, chat_history, None
instruction = chat_history[-1][0]
return *demo_runner.run(instruction, chat_history), ''
def submit_chat(chat_message, history):
history += [[chat_message, None]]
return '', history
def add_cot(chat_messsage):
return chat_messsage.strip() + chain_of_thought_affix
def add_clarification(chat_message):
return chat_message.strip() + ask_for_clarification_affix
with open('README.md', 'r') as f:
for _ in range(12):
next(f)
readme_text = f.read()
with gr.Blocks() as demo:
state = gr.State(None)
with gr.Accordion('Readme', open=False):
gr.Markdown(readme_text)
gr.Markdown('# Interactive Demo')
with gr.Row():
with gr.Column():
with gr.Row():
inp_api_key = gr.Textbox(value=default_open_ai_key,
label='OpenAI API Key (this is not stored anywhere)', lines=1)
inp_model_name = gr.Dropdown(label='Model Name', choices=[
'text-davinci-003', 'code-davinci-002', 'text-davinci-002'], value='text-davinci-003')
with gr.Row():
inp_n_blocks = gr.Slider(label='Number of Blocks', minimum=0, maximum=5, value=3, step=1)
inp_n_bowls = gr.Slider(label='Number of Bowls', minimum=0, maximum=5, value=3, step=1)
btn_setup = gr.Button("Setup/Reset Simulation")
info_setup = gr.Markdown(label='Setup Info')
with gr.Row():
with gr.Column():
chat_box = gr.Chatbot()
inp_instruction = gr.Textbox(label='Instruction', lines=1)
examples = gr.Examples(
[
'stack two of the blocks',
'what color is the rightmost block?',
'arrange the blocks into figure 3',
'put blocks into non-matching bowls',
'swap the positions of one block and another',
],
inp_instruction,
)
btn_add_cot = gr.Button(f'+{chain_of_thought_affix} (chain-of-thought)')
btn_add_cla = gr.Button(
f'+{ask_for_clarification_affix} (conversation)')
btn_run = gr.Button("Run (this may take 30+ seconds)")
info_run = gr.Markdown(label='Generated Code')
with gr.Column():
img_setup = gr.Image(label='Current Simulation State')
# video_run = gr.Video(label='Most Recent Manipulation')
btn_setup.click(
setup,
inputs=[inp_api_key, inp_model_name, inp_n_blocks, inp_n_bowls],
outputs=[info_setup, img_setup, state, chat_box, info_run],
)
btn_add_cot.click(
add_cot,
inp_instruction,
inp_instruction,
)
btn_add_cla.click(
add_clarification,
inp_instruction,
inp_instruction,
)
btn_run.click(
submit_chat,
[inp_instruction, chat_box],
[inp_instruction, chat_box],
).then(
run,
inputs=[state, chat_box],
outputs=[info_run, img_setup, chat_box, inp_instruction],
)
inp_instruction.submit(
submit_chat,
[inp_instruction, chat_box],
[inp_instruction, chat_box],
).then(
run,
inputs=[state, chat_box],
outputs=[info_run, img_setup, chat_box, inp_instruction],
)
if __name__ == '__main__':
print(gr.__version__)
demo.queue(concurrency_count=10)
demo.launch() |