Spaces:
Build error
Build error
lewiswu1209
commited on
Commit
•
cfff8e6
0
Parent(s):
initial commit
Browse files- .gitattributes +31 -0
- README.md +13 -0
- app.py +70 -0
- chinese_vocab.model +3 -0
- requirements.txt +4 -0
.gitattributes
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Gpt2 Chinese Composition
|
3 |
+
emoji: 🐢
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.0.26
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
import torch.nn.functional as F
|
6 |
+
|
7 |
+
from transformers import GPT2LMHeadModel, CpmTokenizer
|
8 |
+
|
9 |
+
def top_k_top_p_filtering( logits, top_k=0, top_p=0.0, filter_value=-float('Inf') ):
|
10 |
+
assert logits.dim() == 1
|
11 |
+
top_k = min( top_k, logits.size(-1) )
|
12 |
+
if top_k > 0:
|
13 |
+
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
14 |
+
logits[indices_to_remove] = filter_value
|
15 |
+
if top_p > 0.0:
|
16 |
+
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
17 |
+
cumulative_probs = torch.cumsum( F.softmax(sorted_logits, dim=-1), dim=-1 )
|
18 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
19 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
20 |
+
sorted_indices_to_remove[..., 0] = 0
|
21 |
+
indices_to_remove = sorted_indices[sorted_indices_to_remove]
|
22 |
+
logits[indices_to_remove] = filter_value
|
23 |
+
return logits
|
24 |
+
|
25 |
+
def generate(title, context, max_len):
|
26 |
+
title_ids = tokenizer.encode(title, add_special_tokens=False)
|
27 |
+
context_ids = tokenizer.encode(context, add_special_tokens=False)
|
28 |
+
input_ids = title_ids + [sep_id] + context_ids
|
29 |
+
cur_len = len(input_ids)
|
30 |
+
input_len = cur_len
|
31 |
+
last_token_id = input_ids[-1]
|
32 |
+
input_ids = torch.tensor([input_ids], dtype=torch.long)
|
33 |
+
|
34 |
+
while True:
|
35 |
+
outputs = model( input_ids=input_ids[:, -200:] )
|
36 |
+
logits = outputs.logits
|
37 |
+
next_token_logits = logits[0, -1, :]
|
38 |
+
next_token_logits = next_token_logits / 1
|
39 |
+
next_token_logits[unk_id] = -float('Inf')
|
40 |
+
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=0, top_p=0.85)
|
41 |
+
next_token_id = torch.multinomial( F.softmax(filtered_logits, dim=-1), num_samples=1 )
|
42 |
+
input_ids = torch.cat( ( input_ids, next_token_id.unsqueeze(0) ), dim=1 )
|
43 |
+
cur_len += 1
|
44 |
+
word = tokenizer.convert_ids_to_tokens( next_token_id.item() )
|
45 |
+
if cur_len >= ( input_len + max_len ) and last_token_id == 8 and next_token_id == 3:
|
46 |
+
break
|
47 |
+
if cur_len >= ( input_len + max_len ) and word in [".", "。", "!", "!", "?", "?", ",", ","]:
|
48 |
+
break
|
49 |
+
if next_token_id == eod_id:
|
50 |
+
break
|
51 |
+
result = tokenizer.decode( input_ids.squeeze(0) )
|
52 |
+
return result
|
53 |
+
|
54 |
+
if __name__ == '__main__':
|
55 |
+
tokenizer = CpmTokenizer(vocab_file="chinese_vocab.model")
|
56 |
+
eod_id = tokenizer.convert_tokens_to_ids("<eod>")
|
57 |
+
sep_id = tokenizer.sep_token_id
|
58 |
+
unk_id = tokenizer.unk_token_id
|
59 |
+
|
60 |
+
model = GPT2LMHeadModel.from_pretrained("lewiswu1209/gpt2-chinese-composition")
|
61 |
+
model.eval()
|
62 |
+
gr.Interface(
|
63 |
+
fn=generate,
|
64 |
+
inputs=[
|
65 |
+
"text",
|
66 |
+
gr.Textbox(lines=7, placeholder="在这里输入一个开头。"),
|
67 |
+
"number"
|
68 |
+
],
|
69 |
+
outputs=gr.Textbox(lines=15, placeholder="这里会输出一段文字。")
|
70 |
+
).launch()
|
chinese_vocab.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:25c1d178d54901291c1735cd2ae0788be90df4de01fb445e8a8a998cab35ba43
|
3 |
+
size 713229
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers
|
3 |
+
sentencepiece
|
4 |
+
jieba
|