xuxw98 commited on
Commit
94e59de
1 Parent(s): b92f949

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -68,7 +68,6 @@ def instruct_generate(
68
  """Generates a response based on a given instruction and an optional input.
69
  This script will only work with checkpoints from the instruction-tuned LLaMA-Adapter model.
70
  See `finetune_adapter.py`.
71
-
72
  Args:
73
  prompt: The prompt/instruction (Alpaca style).
74
  adapter_path: Path to the checkpoint with trained adapter weights, which are the output of
@@ -140,7 +139,7 @@ def create_instruct_demo():
140
  lines=2, label="Instruction")
141
  object_list = gr.Textbox(
142
  lines=5, label="Input")
143
- max_len = gr.Slider(minimum=1, maximum=512,
144
  value=128, label="Max length")
145
  with gr.Accordion(label='Advanced options', open=False):
146
  temp = gr.Slider(minimum=0, maximum=1,
@@ -162,7 +161,7 @@ def create_instruct_demo():
162
  scene_name = os.path.basename(example_img_one).split(".")[0]
163
  example_object_list = example_dict[scene_name]["input_display"]
164
  example_instruction = example_dict[scene_name]["instruction"]
165
- example_one = [example_img_one, example_instruction, example_object_list, 512, 0.8, 200]
166
  examples.append(example_one)
167
 
168
  gr.Examples(
@@ -188,5 +187,3 @@ with gr.Blocks(css='style.css') as demo:
188
  create_instruct_demo()
189
 
190
  demo.queue(api_open=True, concurrency_count=1).launch()
191
-
192
-
 
68
  """Generates a response based on a given instruction and an optional input.
69
  This script will only work with checkpoints from the instruction-tuned LLaMA-Adapter model.
70
  See `finetune_adapter.py`.
 
71
  Args:
72
  prompt: The prompt/instruction (Alpaca style).
73
  adapter_path: Path to the checkpoint with trained adapter weights, which are the output of
 
139
  lines=2, label="Instruction")
140
  object_list = gr.Textbox(
141
  lines=5, label="Input")
142
+ max_len = gr.Slider(minimum=1, maximum=1024,
143
  value=128, label="Max length")
144
  with gr.Accordion(label='Advanced options', open=False):
145
  temp = gr.Slider(minimum=0, maximum=1,
 
161
  scene_name = os.path.basename(example_img_one).split(".")[0]
162
  example_object_list = example_dict[scene_name]["input_display"]
163
  example_instruction = example_dict[scene_name]["instruction"]
164
+ example_one = [example_img_one, example_instruction, example_object_list, 1024, 0.8, 200]
165
  examples.append(example_one)
166
 
167
  gr.Examples(
 
187
  create_instruct_demo()
188
 
189
  demo.queue(api_open=True, concurrency_count=1).launch()