Commit
•
9a00f89
1
Parent(s):
f01eae3
Update: 사용 예제 정리
Browse files
README.md
CHANGED
@@ -110,20 +110,16 @@ Just using `evaluate-metric/bleu` and `evaluate-metric/rouge` in huggingface `ev
|
|
110 |
from transformers.pipelines import Text2TextGenerationPipeline
|
111 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
112 |
texts = ["그러게 누가 6시까지 술을 마시래?"]
|
113 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
114 |
-
|
115 |
-
)
|
116 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(
|
117 |
-
args.model_name_or_path,
|
118 |
-
)
|
119 |
# BartText2TextGenerationPipeline is implemented above (see 'Use')
|
120 |
seq2seqlm_pipeline = BartText2TextGenerationPipeline(model=model, tokenizer=tokenizer)
|
121 |
kwargs = {
|
122 |
-
"min_length":
|
123 |
-
"max_length":
|
124 |
-
"num_beams":
|
125 |
-
"do_sample":
|
126 |
-
"num_beam_groups":
|
127 |
}
|
128 |
pred = seq2seqlm_pipeline(texts, **kwargs)
|
129 |
print(pred)
|
|
|
110 |
from transformers.pipelines import Text2TextGenerationPipeline
|
111 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
112 |
texts = ["그러게 누가 6시까지 술을 마시래?"]
|
113 |
+
tokenizer = AutoTokenizer.from_pretrained("lIlBrother/ko-barTNumText")
|
114 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("lIlBrother/ko-barTNumText")
|
|
|
|
|
|
|
|
|
115 |
# BartText2TextGenerationPipeline is implemented above (see 'Use')
|
116 |
seq2seqlm_pipeline = BartText2TextGenerationPipeline(model=model, tokenizer=tokenizer)
|
117 |
kwargs = {
|
118 |
+
"min_length": 0,
|
119 |
+
"max_length": 1206,
|
120 |
+
"num_beams": 100,
|
121 |
+
"do_sample": False,
|
122 |
+
"num_beam_groups": 1,
|
123 |
}
|
124 |
pred = seq2seqlm_pipeline(texts, **kwargs)
|
125 |
print(pred)
|