Update README.md
Browse files
README.md
CHANGED
@@ -38,7 +38,7 @@ tokenizer.src_lang = "en"
|
|
38 |
encoded = tokenizer(text.split('. '), return_tensors="pt", padding=True)
|
39 |
generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id("ko"))
|
40 |
outputs = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
41 |
-
print('
|
42 |
# => "μ΄ λͺ¨λΈμ μ§μ μ¦λ₯ κΈ°λ²(knowledge distillation techniques)μ μ¬μ©νμ¬ λ―ΈμΈ μ‘°μ λμμ΅λλ€.
|
43 |
# νλ ¨ λ°μ΄ν°μ
(training dataset)μ λν μΈμ΄ λͺ¨λΈ(large language models)μ κΈ°λ°μΌλ‘ ν νμ
λ€μ€ μμ΄μ νΈ νλ μμν¬(collaborative multi-agent framework)λ₯Ό μ¬μ©νμ¬ μμ±λμμ΅λλ€."
|
44 |
|
|
|
38 |
encoded = tokenizer(text.split('. '), return_tensors="pt", padding=True)
|
39 |
generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id("ko"))
|
40 |
outputs = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
41 |
+
print('\n'.join(outputs))
|
42 |
# => "μ΄ λͺ¨λΈμ μ§μ μ¦λ₯ κΈ°λ²(knowledge distillation techniques)μ μ¬μ©νμ¬ λ―ΈμΈ μ‘°μ λμμ΅λλ€.
|
43 |
# νλ ¨ λ°μ΄ν°μ
(training dataset)μ λν μΈμ΄ λͺ¨λΈ(large language models)μ κΈ°λ°μΌλ‘ ν νμ
λ€μ€ μμ΄μ νΈ νλ μμν¬(collaborative multi-agent framework)λ₯Ό μ¬μ©νμ¬ μμ±λμμ΅λλ€."
|
44 |
|