tonic commited on
Commit
49bd6dc
1 Parent(s): b04a8aa

initial commit

Browse files

find the official demo here : https://modelscope.cn/studios/qwen/Qwen-VL-Chat-Demo/

Files changed (3) hide show
  1. .gitattributes +10 -11
  2. README.md +15 -11
  3. app.py +247 -0
.gitattributes CHANGED
@@ -1,35 +1,34 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *.tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.db* filter=lfs diff=lfs merge=lfs -text
29
+ *.ark* filter=lfs diff=lfs merge=lfs -text
30
+ **/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
31
+ **/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
32
+ **/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
33
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
34
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1,17 @@
1
  ---
2
- title: VLChat OfficialDemo
3
- emoji: 📚
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 4.7.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
 
 
11
  ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ domain:
3
+ - multi-modal
4
+ models:
5
+ - qwen/Qwen-VL-Chat
6
+ deployspec:
7
+ cpu: 11
8
+ gpu: 1
9
+ instance: 1
10
+ instance_type: ecs.gn6e-c12g1.3xlarge
11
+ memory: 80000
12
+ license: other
13
  ---
14
+ #### Clone with HTTP
15
+ ```bash
16
+ git clone https://www.modelscope.cn/studios/qwen/Qwen-VL-Chat-Demo.git
17
+ ```
app.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import os
6
+ os.system('pip install tiktoken')
7
+ os.system('pip install "modelscope" --upgrade -f https://pypi.org/project/modelscope/')
8
+ os.system('pip install transformers_stream_generator')
9
+
10
+ # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
11
+ from argparse import ArgumentParser
12
+ from pathlib import Path
13
+
14
+ import copy
15
+ import gradio as gr
16
+ import os
17
+ import re
18
+ import secrets
19
+ import tempfile
20
+ from modelscope import (
21
+ AutoModelForCausalLM, AutoTokenizer, GenerationConfig, snapshot_download
22
+ )
23
+
24
+ DEFAULT_CKPT_PATH = 'qwen/Qwen-VL-Chat'
25
+ REVISION = 'v1.0.4'
26
+ BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
27
+ PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
28
+
29
+
30
+ def _get_args():
31
+ parser = ArgumentParser()
32
+ parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
33
+ help="Checkpoint name or path, default to %(default)r")
34
+ parser.add_argument("--revision", type=str, default=REVISION)
35
+ parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
36
+
37
+ parser.add_argument("--share", action="store_true", default=False,
38
+ help="Create a publicly shareable link for the interface.")
39
+ parser.add_argument("--inbrowser", action="store_true", default=False,
40
+ help="Automatically launch the interface in a new tab on the default browser.")
41
+ parser.add_argument("--server-port", type=int, default=8000,
42
+ help="Demo server port.")
43
+ parser.add_argument("--server-name", type=str, default="127.0.0.1",
44
+ help="Demo server name.")
45
+
46
+ args = parser.parse_args()
47
+ return args
48
+
49
+
50
+ def _load_model_tokenizer(args):
51
+ model_id = args.checkpoint_path
52
+ model_dir = snapshot_download(model_id, revision=args.revision)
53
+ tokenizer = AutoTokenizer.from_pretrained(
54
+ model_dir, trust_remote_code=True, resume_download=True,
55
+ )
56
+
57
+ if args.cpu_only:
58
+ device_map = "cpu"
59
+ else:
60
+ device_map = "auto"
61
+
62
+ model = AutoModelForCausalLM.from_pretrained(
63
+ model_dir,
64
+ device_map=device_map,
65
+ trust_remote_code=True,
66
+ resume_download=True,
67
+ ).eval()
68
+ model.generation_config = GenerationConfig.from_pretrained(
69
+ model_dir, trust_remote_code=True, resume_download=True,
70
+ )
71
+
72
+ return model, tokenizer
73
+
74
+
75
+ def _parse_text(text):
76
+ lines = text.split("\n")
77
+ lines = [line for line in lines if line != ""]
78
+ count = 0
79
+ for i, line in enumerate(lines):
80
+ if "```" in line:
81
+ count += 1
82
+ items = line.split("`")
83
+ if count % 2 == 1:
84
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
85
+ else:
86
+ lines[i] = f"<br></code></pre>"
87
+ else:
88
+ if i > 0:
89
+ if count % 2 == 1:
90
+ line = line.replace("`", r"\`")
91
+ line = line.replace("<", "&lt;")
92
+ line = line.replace(">", "&gt;")
93
+ line = line.replace(" ", "&nbsp;")
94
+ line = line.replace("*", "&ast;")
95
+ line = line.replace("_", "&lowbar;")
96
+ line = line.replace("-", "&#45;")
97
+ line = line.replace(".", "&#46;")
98
+ line = line.replace("!", "&#33;")
99
+ line = line.replace("(", "&#40;")
100
+ line = line.replace(")", "&#41;")
101
+ line = line.replace("$", "&#36;")
102
+ lines[i] = "<br>" + line
103
+ text = "".join(lines)
104
+ return text
105
+
106
+
107
+ def _launch_demo(args, model, tokenizer):
108
+ uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
109
+ Path(tempfile.gettempdir()) / "gradio"
110
+ )
111
+
112
+ def predict(_chatbot, task_history):
113
+ chat_query = _chatbot[-1][0]
114
+ query = task_history[-1][0]
115
+ print("User: " + _parse_text(query))
116
+ history_cp = copy.deepcopy(task_history)
117
+ full_response = ""
118
+
119
+ history_filter = []
120
+ pic_idx = 1
121
+ pre = ""
122
+ for i, (q, a) in enumerate(history_cp):
123
+ if isinstance(q, (tuple, list)):
124
+ q = f'Picture {pic_idx}: <img>{q[0]}</img>'
125
+ pre += q + '\n'
126
+ pic_idx += 1
127
+ else:
128
+ pre += q
129
+ history_filter.append((pre, a))
130
+ pre = ""
131
+ history, message = history_filter[:-1], history_filter[-1][0]
132
+ response, history = model.chat(tokenizer, message, history=history)
133
+ image = tokenizer.draw_bbox_on_latest_picture(response, history)
134
+ if image is not None:
135
+ temp_dir = secrets.token_hex(20)
136
+ temp_dir = Path(uploaded_file_dir) / temp_dir
137
+ temp_dir.mkdir(exist_ok=True, parents=True)
138
+ name = f"tmp{secrets.token_hex(5)}.jpg"
139
+ filename = temp_dir / name
140
+ image.save(str(filename))
141
+ _chatbot[-1] = (_parse_text(chat_query), (str(filename),))
142
+ chat_response = response.replace("<ref>", "")
143
+ chat_response = chat_response.replace(r"</ref>", "")
144
+ chat_response = re.sub(BOX_TAG_PATTERN, "", chat_response)
145
+ if chat_response != "":
146
+ _chatbot.append((None, chat_response))
147
+ else:
148
+ _chatbot[-1] = (_parse_text(chat_query), response)
149
+ full_response = _parse_text(response)
150
+
151
+ task_history[-1] = (query, full_response)
152
+ print("Qwen-VL-Chat: " + _parse_text(full_response))
153
+ task_history = task_history[-10:]
154
+ return _chatbot
155
+
156
+ def regenerate(_chatbot, task_history):
157
+ if not task_history:
158
+ return _chatbot
159
+ item = task_history[-1]
160
+ if item[1] is None:
161
+ return _chatbot
162
+ task_history[-1] = (item[0], None)
163
+ chatbot_item = _chatbot.pop(-1)
164
+ if chatbot_item[0] is None:
165
+ _chatbot[-1] = (_chatbot[-1][0], None)
166
+ else:
167
+ _chatbot.append((chatbot_item[0], None))
168
+ return predict(_chatbot, task_history)
169
+
170
+ def add_text(history, task_history, text):
171
+ task_text = text
172
+ if len(text) >= 2 and text[-1] in PUNCTUATION and text[-2] not in PUNCTUATION:
173
+ task_text = text[:-1]
174
+ history = history + [(_parse_text(text), None)]
175
+ task_history = task_history + [(task_text, None)]
176
+ return history, task_history, ""
177
+
178
+ def add_file(history, task_history, file):
179
+ history = history + [((file.name,), None)]
180
+ task_history = task_history + [((file.name,), None)]
181
+ return history, task_history
182
+
183
+ def reset_user_input():
184
+ return gr.update(value="")
185
+
186
+ def reset_state(task_history):
187
+ task_history.clear()
188
+ return []
189
+
190
+ with gr.Blocks() as demo:
191
+ gr.Markdown("""\
192
+ <p align="center"><img src="https://modelscope.cn/api/v1/models/qwen/Qwen-VL-Chat/repo?Revision=master&FilePath=assets/logo.jpg&View=true" style="height: 80px"/><p>""")
193
+ gr.Markdown("""<center><font size=8>Qwen-VL-Chat Bot</center>""")
194
+ gr.Markdown(
195
+ """\
196
+ <center><font size=3>This WebUI is based on Qwen-VL-Chat, developed by Alibaba Cloud. \
197
+ (本WebUI基于Qwen-VL-Chat打造,实现聊天机器人功能。)</center>""")
198
+ gr.Markdown("""\
199
+ <center><font size=4>Qwen-VL <a href="https://modelscope.cn/models/qwen/Qwen-VL/summary">🤖 </a>
200
+ | <a href="https://huggingface.co/Qwen/Qwen-VL">🤗</a>&nbsp |
201
+ Qwen-VL-Chat <a href="https://modelscope.cn/models/qwen/Qwen-VL-Chat/summary">🤖 </a> |
202
+ <a href="https://huggingface.co/Qwen/Qwen-VL-Chat">🤗</a>&nbsp |
203
+ &nbsp<a href="https://github.com/QwenLM/Qwen-VL">Github</a></center>""")
204
+
205
+ chatbot = gr.Chatbot(label='Qwen-VL-Chat', elem_classes="control-height").style(height=500)
206
+ query = gr.Textbox(lines=2, label='Input')
207
+ task_history = gr.State([])
208
+
209
+ with gr.Row():
210
+ addfile_btn = gr.UploadButton("📁 Upload (上传文件)", file_types=["image"])
211
+ submit_btn = gr.Button("🚀 Submit (发送)")
212
+ regen_btn = gr.Button("🤔️ Regenerate (重试)")
213
+ empty_bin = gr.Button("🧹 Clear History (清除历史)")
214
+
215
+ submit_btn.click(add_text, [chatbot, task_history, query], [chatbot, task_history]).then(
216
+ predict, [chatbot, task_history], [chatbot], show_progress=True
217
+ )
218
+ submit_btn.click(reset_user_input, [], [query])
219
+ empty_bin.click(reset_state, [task_history], [chatbot], show_progress=True)
220
+ regen_btn.click(regenerate, [chatbot, task_history], [chatbot], show_progress=True)
221
+ addfile_btn.upload(add_file, [chatbot, task_history, addfile_btn], [chatbot, task_history], show_progress=True)
222
+
223
+ gr.Markdown("""\
224
+ <font size=2>Note: This demo is governed by the original license of Qwen-VL. \
225
+ We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content, \
226
+ including hate speech, violence, pornography, deception, etc. \
227
+ (注:本演示受Qwen-VL的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,\
228
+ 包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)""")
229
+
230
+ demo.queue().launch(
231
+ share=args.share,
232
+ inbrowser=args.inbrowser,
233
+ server_port=args.server_port,
234
+ server_name=args.server_name,
235
+ )
236
+
237
+
238
+ def main():
239
+ args = _get_args()
240
+
241
+ model, tokenizer = _load_model_tokenizer(args)
242
+
243
+ _launch_demo(args, model, tokenizer)
244
+
245
+
246
+ if __name__ == '__main__':
247
+ main()