|
global { |
|
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B |
|
repo=/mnt/data/jpombal/multilinguality_megatron |
|
|
|
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_checkpoints |
|
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9 |
|
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model |
|
|
|
dataset=(Dataset: en de fr es it nl pt ru zh ko) |
|
|
|
dataset_path=(Dataset: |
|
en=/mnt/data_2/shared/tower_llm_data/en/data |
|
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz |
|
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz |
|
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz |
|
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz |
|
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz |
|
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz |
|
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz |
|
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz |
|
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz |
|
) |
|
|
|
is_hf_dataset=(Dataset: |
|
en=True |
|
es=False |
|
de=False |
|
fr=False |
|
nl=False |
|
pt=False |
|
it=False |
|
ru=False |
|
zh=False |
|
ko=False |
|
) |
|
|
|
threshold=(Dataset: |
|
en=516 |
|
es=275 |
|
de=611 |
|
fr=322 |
|
nl=649 |
|
pt=257 |
|
it=332 |
|
ru=334 |
|
zh=2041 |
|
ko=198 |
|
) |
|
|
|
datamix_weights=( |
|
DataMix: |
|
mc4_uniform=( |
|
Dataset: |
|
en=100 |
|
es=100 |
|
de=100 |
|
fr=100 |
|
nl=100 |
|
pt=100 |
|
it=100 |
|
ru=100 |
|
zh=100 |
|
ko=100 |
|
) |
|
) |
|
|
|
# number such that final tokens for each language are around 1B |
|
n_tokens=(Dataset: |
|
en=1000000000 |
|
es=833333330 |
|
de=833333330 |
|
fr=833333330 |
|
nl=833333330 |
|
pt=833333330 |
|
it=833333330 |
|
ru=500000000 |
|
zh=13888888 |
|
ko=250000000 |
|
) |
|
|
|
min_perplexity=50 |
|
|
|
size=(Size: 7 13) |
|
|
|
log_interval=1 |
|
save_interval=635 |
|
eval_interval=635 |
|
train_steps=12700 |
|
|
|
lr_scheduler=cosine |
|
warmup_steps=127 |
|
lr=3e-5 |
|
lr_min=3e-6 |
|
weight_decay=0.1 |
|
|
|
n_gpus=8 |
|
gpu_ids=0,1,2,3,4,5,6,7 |
|
tp=(TP: 1 2 3 4) |
|
pp=(PP: 1 2 3 4) |
|
micro_batch_size=4 |
|
grad_accum_steps=12 |
|
vocab_size=32000 |
|
|
|
cpu_workers=16 |
|
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33" |
|
wikipedia=False |
|
freeze_layers="" |
|
posterior_tokens=False |
|
n_posterior_tokens=False |
|
eval_iters=1 |
|
is_parallel=False |
|
lp="" |
|
} |
|
|