entai2965's picture
Update README.md
71d67eb verified
metadata
license: other
license_name: ntt-license
license_link: LICENSE
language:
  - ja
  - en
pipeline_tag: translation
library_name: fairseq
tags:
  - nmt

Sugoi v4 JPN->ENG NMT Model by MingShiba

How to download this model using python

import huggingface_hub
huggingface_hub.download_snapshot('entai2965/sugoi-v4-ja-en-ctranslate2',local_dir='sugoi-v4-ja-en-ctranslate2')

How to run this model (batch syntax)

import ctranslate2
import sentencepiece

#set defaults
model_path='sugoi-v4-ja-en-ctranslate2'
sentencepiece_model_path=model_path+'/spm'

device='cpu'
#device='cuda'

#load data
string1='γ―ι™γ‹γ«ε‰γΈγ¨ζ­©γΏε‡ΊγŸγ€‚'
string2='悲しいGPTγ¨θ©±γ—γŸγ“γ¨γŒγ‚γ‚ŠγΎγ™γ‹?'
raw_list=[string1,string2]

#load models
translator = ctranslate2.Translator(model_path, device=device)
tokenizer_for_source_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.ja.nopretok.model')
tokenizer_for_target_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.en.nopretok.model')

#tokenize batch
tokenized_batch=[]
for text in raw_list:
    tokenized_batch.append(tokenizer_for_source_language.encode(text,out_type=str))

#translate
#https://opennmt.net/CTranslate2/python/ctranslate2.Translator.html?#ctranslate2.Translator.translate_batch
translated_batch=translator.translate_batch(source=tokenized_batch,beam_size=5)
assert(len(raw_list)==len(translated_batch))

#decode
for count,tokens in enumerate(translated_batch):
    translated_batch[count]=tokenizer_for_target_language.decode(tokens.hypotheses[0]).replace('<unk>','')

#output
for text in translated_batch:
    print(text)

Functional programming version

import ctranslate2
import sentencepiece

#set defaults
model_path='sugoi-v4-ja-en-ctranslate2'
sentencepiece_model_path=model_path+'/spm'

device='cpu'
#device='cuda'

#load data
string1='γ―ι™γ‹γ«ε‰γΈγ¨ζ­©γΏε‡ΊγŸγ€‚'
string2='悲しいGPTγ¨θ©±γ—γŸγ“γ¨γŒγ‚γ‚ŠγΎγ™γ‹?'
raw_list=[string1,string2]

#load models
translator = ctranslate2.Translator(model_path, device=device)
tokenizer_for_source_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.ja.nopretok.model')
tokenizer_for_target_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.en.nopretok.model')

#invoke black magic
translated_batch=[tokenizer_for_target_language.decode(tokens.hypotheses[0]).replace('<unk>','') for tokens in translator.translate_batch(source=[tokenizer_for_source_language.encode(text,out_type=str) for text in raw_list],beam_size=5)]
assert(len(raw_list)==len(translated_batch))

#output
for text in translated_batch:
    print(text)