File size: 1,598 Bytes
b9af64e 6c68e34 4f9d695 b9af64e 4f9d695 b9af64e 4f9d695 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
---
language:
- ko
tags:
- gpt2
license: cc-by-nc-sa-4.0
---
## Example
```python
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained(
"CheonggyeMountain-Sherpa/kogpt-trinity-punct-wrapper",
revision="punct_wrapper-related_words-overfit", # or punct_wrapper-related_words-minevalloss
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
)
model = AutoModelForCausalLM.from_pretrained(
"CheonggyeMountain-Sherpa/kogpt-trinity-punct-wrapper",
revision="punct_wrapper-related_words-overfit", # or punct_wrapper-related_words-minevalloss
pad_token_id=tokenizer.eos_token_id,
).to(device="cuda")
model.eval()
prompt = "์์์ด ๋ณด์ด๋ ๊ฒฝ์น"
wrapped_prompt = f"@{prompt}@<usr>\n"
with torch.no_grad():
tokens = tokenizer.encode(wrapped_prompt, return_tensors="pt").to(device="cuda")
gen_tokens = model.generate(
tokens,
max_length=64,
repetition_penalty=2.0,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
top_k=16,
top_p=0.8,
)
generated = tokenizer.decode(gen_tokens[0][len(tokens[0]):])
print(generated)
# ํด๊ฐ ์ง๊ณ ์์ ๋ฌด๋ ต
# ๋๋ ์์์ ๋ณด๋ฌ ๊ฐ๋ค
# ๋ถ์ ํ๋๊ณผ ํ์ ๊ตฌ๋ฆ์ด ๋๋ฅผ ๋ฐ๊ฒจ์ค ๊ฒ ๊ฐ์์๋ฆฌ
# ํ์ง๋ง ๋ด๊ฐ ๋ณธ ํด๋ ์ ๋ฌผ์ด๋ง ๊ฐ๊ณ
# ๊ตฌ๋ฆ๋ง์ ์์ทจ๋ฅผ ๊ฐ์ถ ์ด๋ ๋ง์ด ๋จ์์์ ๋ฟ์ด๋ค
# ๋ด๊ฐ ํ ๋ฐฐ๋ ๋ณด์ด์ง๋ ์๊ณ
``` |