wuhaibo commited on
Commit
b975ab1
1 Parent(s): 17e63e8

Upload folder using huggingface_hub

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # 默认忽略的文件
2
+ /shelf/
3
+ /workspace.xml
4
+ # 基于编辑器的 HTTP 客户端请求
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/Qwen-7B-Chat.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="Eslint" enabled="true" level="WARNING" enabled_by_default="true" />
5
+ </profile>
6
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/Qwen-7B-Chat.iml" filepath="$PROJECT_DIR$/.idea/Qwen-7B-Chat.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/workspace.xml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="c6bbbaf3-da0f-48b6-9ae8-dda48c98f649" name="更改" comment="" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="FileTemplateManagerImpl">
14
+ <option name="RECENT_TEMPLATES">
15
+ <list>
16
+ <option value="Python Script" />
17
+ </list>
18
+ </option>
19
+ </component>
20
+ <component name="ProjectColorInfo">{
21
+ &quot;associatedIndex&quot;: 7
22
+ }</component>
23
+ <component name="ProjectId" id="2Uh1hbG7YmTBJqsinY7tHj3TCLM" />
24
+ <component name="ProjectViewState">
25
+ <option name="hideEmptyMiddlePackages" value="true" />
26
+ <option name="showLibraryContents" value="true" />
27
+ </component>
28
+ <component name="PropertiesComponent">{
29
+ &quot;keyToString&quot;: {
30
+ &quot;RunOnceActivity.OpenProjectViewOnStart&quot;: &quot;true&quot;,
31
+ &quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
32
+ &quot;WebServerToolWindowFactoryState&quot;: &quot;false&quot;,
33
+ &quot;ignore.virus.scanning.warn.message&quot;: &quot;true&quot;,
34
+ &quot;last_opened_file_path&quot;: &quot;D:/git/Qwen-7B-Chat&quot;,
35
+ &quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
36
+ &quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
37
+ &quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
38
+ &quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
39
+ &quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
40
+ }
41
+ }</component>
42
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="应用程序级" UseSingleDictionary="true" transferred="true" />
43
+ <component name="TaskManager">
44
+ <task active="true" id="Default" summary="默认任务">
45
+ <changelist id="c6bbbaf3-da0f-48b6-9ae8-dda48c98f649" name="更改" comment="" />
46
+ <created>1693377117980</created>
47
+ <option name="number" value="Default" />
48
+ <option name="presentableId" value="Default" />
49
+ <updated>1693377117980</updated>
50
+ <workItem from="1693377119067" duration="1095000" />
51
+ <workItem from="1693378333675" duration="5827000" />
52
+ </task>
53
+ <servers />
54
+ </component>
55
+ <component name="TypeScriptGeneratedFilesManager">
56
+ <option name="version" value="3" />
57
+ </component>
58
+ <component name="com.intellij.coverage.CoverageDataManagerImpl">
59
+ <SUITE FILE_PATH="coverage/Qwen_7B_Chat$Qwen_7B_Chat.coverage" NAME="Qwen-7B-Chat 覆盖结果" MODIFIED="1693378440996" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
60
+ <SUITE FILE_PATH="coverage/Qwen_7B_Chat$web_demo.coverage" NAME="web_demo 覆盖结果" MODIFIED="1693384340803" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
61
+ </component>
62
+ </project>
Qwen-7B-Chat.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+ from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
4
+
5
+ model_id = 'qwen/Qwen-7B-Chat'
6
+ revision = 'v1.0.5'
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
8
+ # use fp16
9
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", revision=revision,
10
+ trust_remote_code=True, fp16=True).eval()
11
+ model.generation_config = GenerationConfig.from_pretrained(model_id,
12
+ trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
13
+
14
+ stop_stream = False
15
+
16
+
17
+ def clear_screen():
18
+ if platform.system() == "Windows":
19
+ os.system("cls")
20
+ else:
21
+ os.system("clear")
22
+
23
+
24
+ def print_history(history):
25
+ for pair in history:
26
+ print(f"\nUser:{pair[0]}\nQwen-7B:{pair[1]}")
27
+
28
+
29
+ def main():
30
+ history, response = [], ''
31
+ global stop_stream
32
+ clear_screen()
33
+ print("欢迎使用 Qwen-7B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
34
+ while True:
35
+ query = input("\nUser:")
36
+ if query.strip() == "stop":
37
+ break
38
+ if query.strip() == "clear":
39
+ history = []
40
+ clear_screen()
41
+ print("欢迎使用 Qwen-7B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
42
+ continue
43
+ for response in model.chat_stream(tokenizer, query, history=history):
44
+ if stop_stream:
45
+ stop_stream = False
46
+ break
47
+ else:
48
+ clear_screen()
49
+ print_history(history)
50
+ print(f"\nUser: {query}")
51
+ print("\nQwen-7B:", end="")
52
+ print(response)
53
+
54
+ history.append((query, response))
55
+
56
+
57
+ if __name__ == "__main__":
58
+ main()
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Qwen 7B Chat
3
- emoji: 🏃
4
- colorFrom: gray
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 3.41.2
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Qwen-7B-Chat
3
+ app_file: web_demo.py
 
 
4
  sdk: gradio
5
  sdk_version: 3.41.2
 
 
6
  ---
 
 
web_demo.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """A simple web interactive chat demo based on gradio."""
7
+ import os
8
+ from argparse import ArgumentParser
9
+
10
+ import gradio as gr
11
+ import mdtex2html
12
+
13
+ import torch
14
+ from transformers import AutoModelForCausalLM, AutoTokenizer
15
+ from transformers.generation import GenerationConfig
16
+
17
+
18
+ DEFAULT_CKPT_PATH = 'Qwen/Qwen-7B-Chat'
19
+
20
+
21
+ def _get_args():
22
+ parser = ArgumentParser()
23
+ parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
24
+ help="Checkpoint name or path, default to %(default)r")
25
+ parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
26
+
27
+ parser.add_argument("--share", action="store_true", default=False,
28
+ help="Create a publicly shareable link for the interface.")
29
+ parser.add_argument("--inbrowser", action="store_true", default=False,
30
+ help="Automatically launch the interface in a new tab on the default browser.")
31
+ parser.add_argument("--server-port", type=int, default=8000,
32
+ help="Demo server port.")
33
+ parser.add_argument("--server-name", type=str, default="127.0.0.1",
34
+ help="Demo server name.")
35
+
36
+ args = parser.parse_args()
37
+ return args
38
+
39
+
40
+ def _load_model_tokenizer(args):
41
+ tokenizer = AutoTokenizer.from_pretrained(
42
+ args.checkpoint_path, trust_remote_code=True, resume_download=True,
43
+ )
44
+
45
+ if args.cpu_only:
46
+ device_map = "cpu"
47
+ else:
48
+ device_map = "auto"
49
+
50
+ qconfig_path = os.path.join(args.checkpoint_path, 'quantize_config.json')
51
+ if os.path.exists(qconfig_path):
52
+ from auto_gptq import AutoGPTQForCausalLM
53
+ model = AutoGPTQForCausalLM.from_quantized(
54
+ args.checkpoint_path,
55
+ device_map=device_map,
56
+ trust_remote_code=True,
57
+ resume_download=True,
58
+ use_safetensors=True,
59
+ ).eval()
60
+ else:
61
+ model = AutoModelForCausalLM.from_pretrained(
62
+ args.checkpoint_path,
63
+ device_map=device_map,
64
+ trust_remote_code=True,
65
+ resume_download=True,
66
+ ).eval()
67
+
68
+ config = GenerationConfig.from_pretrained(
69
+ args.checkpoint_path, trust_remote_code=True, resume_download=True,
70
+ )
71
+
72
+ return model, tokenizer, config
73
+
74
+
75
+ def postprocess(self, y):
76
+ if y is None:
77
+ return []
78
+ for i, (message, response) in enumerate(y):
79
+ y[i] = (
80
+ None if message is None else mdtex2html.convert(message),
81
+ None if response is None else mdtex2html.convert(response),
82
+ )
83
+ return y
84
+
85
+
86
+ gr.Chatbot.postprocess = postprocess
87
+
88
+
89
+ def _parse_text(text):
90
+ lines = text.split("\n")
91
+ lines = [line for line in lines if line != ""]
92
+ count = 0
93
+ for i, line in enumerate(lines):
94
+ if "```" in line:
95
+ count += 1
96
+ items = line.split("`")
97
+ if count % 2 == 1:
98
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
99
+ else:
100
+ lines[i] = f"<br></code></pre>"
101
+ else:
102
+ if i > 0:
103
+ if count % 2 == 1:
104
+ line = line.replace("`", r"\`")
105
+ line = line.replace("<", "&lt;")
106
+ line = line.replace(">", "&gt;")
107
+ line = line.replace(" ", "&nbsp;")
108
+ line = line.replace("*", "&ast;")
109
+ line = line.replace("_", "&lowbar;")
110
+ line = line.replace("-", "&#45;")
111
+ line = line.replace(".", "&#46;")
112
+ line = line.replace("!", "&#33;")
113
+ line = line.replace("(", "&#40;")
114
+ line = line.replace(")", "&#41;")
115
+ line = line.replace("$", "&#36;")
116
+ lines[i] = "<br>" + line
117
+ text = "".join(lines)
118
+ return text
119
+
120
+
121
+ def _launch_demo(args, model, tokenizer, config):
122
+
123
+ def predict(_query, _chatbot, _task_history):
124
+ print(f"User: {_parse_text(_query)}")
125
+ _chatbot.append((_parse_text(_query), ""))
126
+ full_response = ""
127
+
128
+ for response in model.chat_stream(tokenizer, _query, history=_task_history, generation_config=config):
129
+ _chatbot[-1] = (_parse_text(_query), _parse_text(response))
130
+
131
+ yield _chatbot
132
+ full_response = _parse_text(response)
133
+
134
+ print(f"History: {_task_history}")
135
+ _task_history.append((_query, full_response))
136
+ print(f"Qwen-7B-Chat: {_parse_text(full_response)}")
137
+
138
+ def regenerate(_chatbot, _task_history):
139
+ if not _task_history:
140
+ yield _chatbot
141
+ return
142
+ item = _task_history.pop(-1)
143
+ _chatbot.pop(-1)
144
+ yield from predict(item[0], _chatbot, _task_history)
145
+
146
+ def reset_user_input():
147
+ return gr.update(value="")
148
+
149
+ def reset_state(_chatbot, _task_history):
150
+ _task_history.clear()
151
+ _chatbot.clear()
152
+ import gc
153
+ gc.collect()
154
+ torch.cuda.empty_cache()
155
+ return _chatbot
156
+
157
+ with gr.Blocks() as demo:
158
+ gr.Markdown("""\
159
+ <p align="center"><img src="https://modelscope.cn/api/v1/models/qwen/Qwen-7B-Chat/repo?
160
+ Revision=master&FilePath=assets/logo.jpeg&View=true" style="height: 80px"/><p>""")
161
+ gr.Markdown("""<center><font size=8>Qwen-7B-Chat Bot</center>""")
162
+ gr.Markdown(
163
+ """\
164
+ <center><font size=3>This WebUI is based on Qwen-7B-Chat, developed by Alibaba Cloud. \
165
+ (本WebUI基于Qwen-7B-Chat打造,实现聊天机器人功能。)</center>""")
166
+ gr.Markdown("""\
167
+ <center><font size=4>Qwen-7B <a href="https://modelscope.cn/models/qwen/Qwen-7B/summary">🤖 </a>
168
+ | <a href="https://huggingface.co/Qwen/Qwen-7B">🤗</a>&nbsp |
169
+ Qwen-7B-Chat <a href="https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary">🤖 </a> |
170
+ <a href="https://huggingface.co/Qwen/Qwen-7B-Chat">🤗</a>&nbsp |
171
+ &nbsp<a href="https://github.com/QwenLM/Qwen-7B">Github</a></center>""")
172
+
173
+ chatbot = gr.Chatbot(label='Qwen-7B-Chat', elem_classes="control-height")
174
+ query = gr.Textbox(lines=2, label='Input')
175
+ task_history = gr.State([])
176
+
177
+ with gr.Row():
178
+ empty_btn = gr.Button("🧹 Clear History (清除历史)")
179
+ submit_btn = gr.Button("🚀 Submit (发送)")
180
+ regen_btn = gr.Button("🤔️ Regenerate (重试)")
181
+
182
+ submit_btn.click(predict, [query, chatbot, task_history], [chatbot], show_progress=True)
183
+ submit_btn.click(reset_user_input, [], [query])
184
+ empty_btn.click(reset_state, [chatbot, task_history], outputs=[chatbot], show_progress=True)
185
+ regen_btn.click(regenerate, [chatbot, task_history], [chatbot], show_progress=True)
186
+
187
+ gr.Markdown("""\
188
+ <font size=2>Note: This demo is governed by the original license of Qwen-7B. \
189
+ We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content, \
190
+ including hate speech, violence, pornography, deception, etc. \
191
+ (注:本演示受Qwen-7B的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,\
192
+ 包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)""")
193
+
194
+ demo.queue().launch(
195
+ # share=args.share,
196
+ share=True,
197
+ inbrowser=args.inbrowser,
198
+ server_port=args.server_port,
199
+ server_name=args.server_name,
200
+ )
201
+
202
+
203
+ def main():
204
+ args = _get_args()
205
+
206
+ model, tokenizer, config = _load_model_tokenizer(args)
207
+
208
+ _launch_demo(args, model, tokenizer, config)
209
+
210
+
211
+ if __name__ == '__main__':
212
+ main()