File size: 2,110 Bytes
577164e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
import torch
from utils.simple_bleu import simple_score
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", torch_dtype=torch.bfloat16, device_map="auto")
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
def translate_ko2en(text):
tokenizer.src_lang = "ko_KR"
input_ids = tokenizer(text, return_tensors="pt").input_ids.to(model.device)
outputs = model.generate(input_ids=input_ids, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
outputs = tokenizer.decode(outputs[0], skip_special_tokens=True)
return outputs
def translate_en2ko(text):
tokenizer.src_lang = "en_XX"
input_ids = tokenizer(text, return_tensors="pt").input_ids.to(model.device)
outputs = model.generate(input_ids=input_ids, forced_bos_token_id=tokenizer.lang_code_to_id["ko_KR"], max_new_tokens=2048)
outputs = tokenizer.decode(outputs[0], skip_special_tokens=True)
return outputs
def main():
while True:
text = input('>')
en_text = translate_ko2en(text)
ko_text = translate_en2ko(en_text)
print('en_text', en_text)
print('ko_text', ko_text)
print('score', simple_score(text, ko_text))
"""
>>? 3์ฒ๋ง ๊ฐ๊ฐ ๋๋ ํ์ผ๊ณผ 250์ต ๊ฐ์ ํ ํฐ์ด ์์ต๋๋ค. Phi1.5์ ๋ฐ์ดํฐ ์ธํธ ๊ตฌ์ฑ์ ์ ๊ทผํ์ง๋ง ์คํ ์์ค ๋ชจ๋ธ์ธ Mixtral 8x7B๋ฅผ ์ฌ์ฉํ๊ณ Apache2.0 ๋ผ์ด์ ์ค์ ๋ฐ๋ผ ๋ผ์ด์ ์ค๊ฐ ๋ถ์ฌ๋ฉ๋๋ค.
en_text It has over 30 million files and 2.5 billion tokens, accesses the data set configuration of Phi1.5, but uses an open-source model, Mixtral 8x7B, and is licensed under the Apache 2.0 license.
ko_text 30๋ง๊ฐ์ ํ์ผ๊ณผ 2.5์ต๊ฐ์ ํ ํฐ์ ๊ฐ์ง๊ณ ์๊ณ , Phi1.5์ ๋ฐ์ดํฐ ์ธํธ configuration์ ์ ๊ทผํ์ง๋ง, ์คํ์์ค ๋ชจ๋ธ์ธ Mixtral 8x7B๋ฅผ ์ฌ์ฉํ๊ณ , Apache 2.0 ๋ผ์ด์ผ์ค ์๋ licenc๋ฅผ ๊ฐ์ง๊ณ ์์ต๋๋ค.
score 0.14724623770949022
"""
if __name__ == "__main__":
main()
|