liangyuxin
commited on
Commit
•
73845dd
1
Parent(s):
c3ebc0a
change config
Browse files- README.md +7 -2
- config.json +0 -3
README.md
CHANGED
@@ -48,18 +48,23 @@ pip install --editable .
|
|
48 |
```
|
49 |
|
50 |
```python3
|
51 |
-
import argparse
|
52 |
import torch
|
53 |
from fengshen.models.DAVAE.DAVAEModel import DAVAEModel
|
|
|
54 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
|
|
|
|
55 |
vae_model = DAVAEModel.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese").to(device)
|
56 |
input_texts = [
|
57 |
"针对电力系统中的混沌振荡对整个互联电网的危害问题,提出了一种基于非线性光滑函数的滑模控制方法.",
|
58 |
"超市面积不算大.挺方便附近的居民购买的. 生活用品也比较齐全.价格适用中.",
|
59 |
]
|
60 |
-
output_texts = vae_model.simulate_batch(input_texts)
|
61 |
print(output_texts)
|
62 |
|
|
|
63 |
```
|
64 |
|
65 |
## 引用 Citation
|
|
|
48 |
```
|
49 |
|
50 |
```python3
|
|
|
51 |
import torch
|
52 |
from fengshen.models.DAVAE.DAVAEModel import DAVAEModel
|
53 |
+
from transformers import BertTokenizer,T5Tokenizer
|
54 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
55 |
+
|
56 |
+
encoder_tokenizer = BertTokenizer.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese")
|
57 |
+
decoder_tokenizer = T5Tokenizer.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese", eos_token = '<|endoftext|>', pad_token = '<pad>',extra_ids=0)
|
58 |
+
decoder_tokenizer.add_special_tokens({'bos_token':'<bos>'})
|
59 |
vae_model = DAVAEModel.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese").to(device)
|
60 |
input_texts = [
|
61 |
"针对电力系统中的混沌振荡对整个互联电网的危害问题,提出了一种基于非线性光滑函数的滑模控制方法.",
|
62 |
"超市面积不算大.挺方便附近的居民购买的. 生活用品也比较齐全.价格适用中.",
|
63 |
]
|
64 |
+
output_texts = vae_model.simulate_batch(encoder_tokenizer,decoder_tokenizer,input_texts)
|
65 |
print(output_texts)
|
66 |
|
67 |
+
|
68 |
```
|
69 |
|
70 |
## 引用 Citation
|
config.json
CHANGED
@@ -1,7 +1,4 @@
|
|
1 |
{
|
2 |
-
"encoder_model_path": "/encoder/",
|
3 |
-
"decoder_model_path": "/decoder/",
|
4 |
-
"sentencepiece_model_path":"cog-pretrain.model",
|
5 |
"latent_size":256,
|
6 |
"seed": 42,
|
7 |
"ratio_increase": 0.5,
|
|
|
1 |
{
|
|
|
|
|
|
|
2 |
"latent_size":256,
|
3 |
"seed": 42,
|
4 |
"ratio_increase": 0.5,
|