Havi999 commited on
Commit
bcefde4
·
1 Parent(s): e1f7a57

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +2 -8
  2. requirements.txt +9 -0
  3. web_demo.py +115 -0
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Tongyi
3
- emoji: 💻
4
- colorFrom: pink
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 3.40.1
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: tongyi
3
+ app_file: web_demo.py
 
 
4
  sdk: gradio
5
  sdk_version: 3.40.1
 
 
6
  ---
 
 
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ protobuf
2
+ transformers==4.27.1
3
+ cpm_kernels
4
+ torch>=1.10
5
+ gradio
6
+ mdtex2html
7
+ sentencepiece
8
+ accelerate
9
+ safetensors
web_demo.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModel, AutoTokenizer,AutoModelForCausalLM
2
+ import torch
3
+
4
+ import gradio as gr
5
+ import mdtex2html
6
+
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
9
+
10
+ # use bf16
11
+ # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
12
+ # use fp16
13
+ # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval()
14
+ # use cpu only
15
+ # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="cpu", trust_remote_code=True).eval()
16
+ # use auto mode, automatically select precision based on the device.
17
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
18
+
19
+ # Specify hyperparameters for generation
20
+ model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
21
+ model = model.eval()
22
+
23
+ """Override Chatbot.postprocess"""
24
+
25
+
26
+ def postprocess(self, y):
27
+ if y is None:
28
+ return []
29
+ for i, (message, response) in enumerate(y):
30
+ y[i] = (
31
+ None if message is None else mdtex2html.convert((message)),
32
+ None if response is None else mdtex2html.convert(response),
33
+ )
34
+ return y
35
+
36
+
37
+ gr.Chatbot.postprocess = postprocess
38
+
39
+
40
+ def parse_text(text):
41
+ """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
42
+ lines = text.split("\n")
43
+ lines = [line for line in lines if line != ""]
44
+ count = 0
45
+ for i, line in enumerate(lines):
46
+ if "```" in line:
47
+ count += 1
48
+ items = line.split('`')
49
+ if count % 2 == 1:
50
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
51
+ else:
52
+ lines[i] = f'<br></code></pre>'
53
+ else:
54
+ if i > 0:
55
+ if count % 2 == 1:
56
+ line = line.replace("`", "\`")
57
+ line = line.replace("<", "&lt;")
58
+ line = line.replace(">", "&gt;")
59
+ line = line.replace(" ", "&nbsp;")
60
+ line = line.replace("*", "&ast;")
61
+ line = line.replace("_", "&lowbar;")
62
+ line = line.replace("-", "&#45;")
63
+ line = line.replace(".", "&#46;")
64
+ line = line.replace("!", "&#33;")
65
+ line = line.replace("(", "&#40;")
66
+ line = line.replace(")", "&#41;")
67
+ line = line.replace("$", "&#36;")
68
+ lines[i] = "<br>"+line
69
+ text = "".join(lines)
70
+ return text
71
+
72
+
73
+ def predict(input, chatbot, max_length, top_p, temperature, history):
74
+ chatbot.append((parse_text(input), ""))
75
+ for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
76
+ temperature=temperature):
77
+ chatbot[-1] = (parse_text(input), parse_text(response))
78
+
79
+ yield chatbot, history
80
+
81
+
82
+ def reset_user_input():
83
+ return gr.update(value='')
84
+
85
+
86
+ def reset_state():
87
+ return [], []
88
+
89
+
90
+ with gr.Blocks() as demo:
91
+ gr.HTML("""<h1 align="center">ChatGLM</h1>""")
92
+
93
+ chatbot = gr.Chatbot()
94
+ with gr.Row():
95
+ with gr.Column(scale=4):
96
+ with gr.Column(scale=12):
97
+ user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
98
+ container=False)
99
+ with gr.Column(min_width=32, scale=1):
100
+ submitBtn = gr.Button("Submit", variant="primary")
101
+ with gr.Column(scale=1):
102
+ emptyBtn = gr.Button("Clear History")
103
+ max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
104
+ top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
105
+ temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
106
+
107
+ history = gr.State([])
108
+
109
+ submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history],
110
+ show_progress=True)
111
+ submitBtn.click(reset_user_input, [], [user_input])
112
+
113
+ emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
114
+
115
+ demo.queue().launch(share=True, inbrowser=True)