Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,26 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# unilm-base-chinese-news-sum
|
2 |
+
|
3 |
+
```sh
|
4 |
+
pip install git+https://github.com/Liadrinz/transformers-unilm # 安装兼容HuggingFace的UniLM模型代码
|
5 |
+
```
|
6 |
+
|
7 |
+
```py
|
8 |
+
from unilm import UniLMTokenizer, UniLMForConditionalGeneration
|
9 |
+
|
10 |
+
|
11 |
+
news_article = (
|
12 |
+
"12月23日,河北石家庄。8岁哥哥轻车熟路哄睡弟弟,姿势标准动作熟练。"
|
13 |
+
"妈妈杨女士表示:哥哥很喜欢弟弟,因为心思比较细,自己平时带孩子的习惯他都会跟着学习,"
|
14 |
+
"哄睡孩子也都会争着来,技巧很娴熟,两人在一块很有爱,自己感到很幸福,平时帮了自己很大的忙,感恩有这么乖的宝宝。"
|
15 |
+
)
|
16 |
+
|
17 |
+
tokenizer = UniLMTokenizer.from_pretrained("Yuang/unilm-base-chinese-news-sum")
|
18 |
+
model = UniLMForConditionalGeneration.from_pretrained("Yuang/unilm-base-chinese-news-sum")
|
19 |
+
|
20 |
+
inputs = tokenizer(news_article, return_tensors="pt")
|
21 |
+
output_ids = model.generate(**inputs, max_new_tokens=16)
|
22 |
+
output_text = tokenizer.decode(output_ids[0])
|
23 |
+
print(output_text) # "[CLS] <news_article> [SEP] <news_summary> [SEP]"
|
24 |
+
news_summary = output_text.split("[SEP]")[1].strip()
|
25 |
+
print(news_summary)
|
26 |
+
```
|