LLM_RECIPES_DIR=/code/llm-recipes | |
source $LLM_RECIPES_DIR/scripts/wmt2024/tokens.sh | |
python /code/llm-recipes/tools/hf_inference.py \ | |
--model /work/models/translation_finetuned_hf/mistral-llm-recipes-en-ja-continuous-pretrained-v1-dev-finetune-chunked-docs-all-averaged-71-75 \ | |
-i /work/wmt2024_test/LLM/wmttest2024.src.sentence_splited.with_template.en-ja.en.jsonl \ | |
-o /work/translation/wmt24_test/en-ja/mistral-greedy \ | |
-g 0 \ | |
-b 4096 \ | |
--dynamic_max_new_token_ratio 3.0 | |
echo "Done!" | |