Update README.md
Browse files
README.md
CHANGED
@@ -30,7 +30,8 @@ tokenizer = M2M100Tokenizer.from_pretrained(model_name)
|
|
30 |
model = M2M100ForConditionalGeneration.from_pretrained(model_name)
|
31 |
|
32 |
# Example sentence
|
33 |
-
text = "The model was fine-tuned using knowledge distillation techniques
|
|
|
34 |
|
35 |
# Tokenize and generate translation
|
36 |
tokenizer.src_lang = "en"
|
@@ -38,7 +39,8 @@ encoded = tokenizer(text.split('. '), return_tensors="pt", padding=True)
|
|
38 |
generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id("ko"))
|
39 |
outputs = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
40 |
print(' '.join(outputs))
|
41 |
-
# => "μ΄ λͺ¨λΈμ μ§μ μ¦λ₯ κΈ°λ²(knowledge distillation techniques)μ μ¬μ©νμ¬ λ―ΈμΈ μ‘°μ λμμ΅λλ€.
|
|
|
42 |
|
43 |
```
|
44 |
|
|
|
30 |
model = M2M100ForConditionalGeneration.from_pretrained(model_name)
|
31 |
|
32 |
# Example sentence
|
33 |
+
text = "The model was fine-tuned using knowledge distillation techniques.\
|
34 |
+
The training dataset was created using a collaborative multi-agent framework powered by large language models."
|
35 |
|
36 |
# Tokenize and generate translation
|
37 |
tokenizer.src_lang = "en"
|
|
|
39 |
generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id("ko"))
|
40 |
outputs = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
41 |
print(' '.join(outputs))
|
42 |
+
# => "μ΄ λͺ¨λΈμ μ§μ μ¦λ₯ κΈ°λ²(knowledge distillation techniques)μ μ¬μ©νμ¬ λ―ΈμΈ μ‘°μ λμμ΅λλ€.
|
43 |
+
# νλ ¨ λ°μ΄ν°μ
(training dataset)μ λν μΈμ΄ λͺ¨λΈ(large language models)μ κΈ°λ°μΌλ‘ ν νμ
λ€μ€ μμ΄μ νΈ νλ μμν¬(collaborative multi-agent framework)λ₯Ό μ¬μ©νμ¬ μμ±λμμ΅λλ€."
|
44 |
|
45 |
```
|
46 |
|