xuxw98 commited on
Commit
5d62d66
·
1 Parent(s): 7a01afc

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -0
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import time
3
+ import warnings
4
+ from pathlib import Path
5
+
6
+
7
+ # 配置hugface环境
8
+ from huggingface_hub import hf_hub_download
9
+ import gradio as gr
10
+ import os
11
+ import glob
12
+ import json
13
+
14
+ # os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
15
+ # torch.set_float32_matmul_precision("high")
16
+
17
+
18
+
19
+ def instruct_generate(
20
+ img_path: str = " ",
21
+ prompt: str = "What food do lamas eat?",
22
+ input: str = "",
23
+ max_new_tokens: int = 100,
24
+ top_k: int = 200,
25
+ temperature: float = 0.8,
26
+ ) -> None:
27
+ """Generates a response based on a given instruction and an optional input.
28
+ This script will only work with checkpoints from the instruction-tuned LLaMA-Adapter model.
29
+ See `finetune_adapter.py`.
30
+
31
+ Args:
32
+ prompt: The prompt/instruction (Alpaca style).
33
+ adapter_path: Path to the checkpoint with trained adapter weights, which are the output of
34
+ `finetune_adapter.py`.
35
+ input: Optional input (Alpaca style).
36
+ pretrained_path: The path to the checkpoint with pretrained LLaMA weights.
37
+ tokenizer_path: The tokenizer path to load.
38
+ quantize: Whether to quantize the model and using which method:
39
+ ``"llm.int8"``: LLM.int8() mode,
40
+ ``"gptq.int4"``: GPTQ 4-bit mode.
41
+ max_new_tokens: The number of generation steps to take.
42
+ top_k: The number of top most probable tokens to consider in the sampling process.
43
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
44
+ """
45
+ output = [prompt, input, max_new_tokens, top_k, temperature]
46
+ print(output)
47
+ return output
48
+
49
+ # 配置具体参数
50
+
51
+ example_path = "example.json"
52
+ # 1024如果不够, 调整为512
53
+ max_seq_len = 1024
54
+ max_batch_size = 1
55
+
56
+ with open(example_path, 'r') as f:
57
+ content = f.read()
58
+ example_dict = json.loads(content)
59
+
60
+
61
+ def create_instruct_demo():
62
+ with gr.Blocks() as instruct_demo:
63
+ with gr.Row():
64
+ with gr.Column():
65
+ scene_img = gr.Image(label='Scene', type='filepath')
66
+ object_list = gr.Textbox(
67
+ lines=2, label="Input")
68
+
69
+ instruction = gr.Textbox(
70
+ lines=2, label="Instruction")
71
+ max_len = gr.Slider(minimum=1, maximum=512,
72
+ value=128, label="Max length")
73
+ with gr.Accordion(label='Advanced options', open=False):
74
+ temp = gr.Slider(minimum=0, maximum=1,
75
+ value=0.8, label="Temperature")
76
+ top_k = gr.Slider(minimum=100, maximum=300,
77
+ value=200, label="Top k")
78
+
79
+ run_botton = gr.Button("Run")
80
+
81
+ with gr.Column():
82
+ outputs = gr.Textbox(lines=10, label="Output")
83
+
84
+ inputs = [instruction, object_list, max_len, top_k, temp]
85
+
86
+ # 接下来设定具体的example格式
87
+ examples_img_list = glob.glob("caption_demo/*.png")
88
+ examples = []
89
+ for example_img_one in examples_img_list:
90
+ scene_name = os.path.basename(example_img_one).split(".")[0]
91
+ example_object_list = example_dict[scene_name]["input"]
92
+ example_instruction = example_dict[scene_name]["instruction"]
93
+ example_one = [example_img_one, example_object_list, example_instruction, 512, 0.8, 200]
94
+ examples.append(example_one)
95
+
96
+ gr.Examples(
97
+ examples=examples,
98
+ inputs=inputs,
99
+ outputs=outputs,
100
+ fn=instruct_generate,
101
+ cache_examples=os.getenv('SYSTEM') == 'spaces'
102
+ )
103
+ run_botton.click(fn=instruct_generate, inputs=inputs, outputs=outputs)
104
+ return instruct_demo
105
+
106
+
107
+ # Please refer to our [arXiv paper](https://arxiv.org/abs/2303.16199) and [github](https://github.com/ZrrSkywalker/LLaMA-Adapter) for more details.
108
+ description = """
109
+ # TaPA
110
+ The official demo for **Embodied Task Planning with Large Language Models**.
111
+ """
112
+
113
+ with gr.Blocks(css='style.css') as demo:
114
+ gr.Markdown(description)
115
+ with gr.TabItem("Instruction-Following"):
116
+ create_instruct_demo()
117
+
118
+ demo.queue(api_open=True, concurrency_count=1).launch()
119
+
120
+