test-med / configs /sampling.yml
stellaathena's picture
Added model weights
360abcb
# Data paths and options when using EleutherAI cluster
{
"data-path": "/mnt/ssd-1/data/enron/enron_text_document",
# or for weighted datasets:
# "train-data-paths": ["/mnt/ssd-1/data/enron/enron_text_document", "/mnt/ssd-cluster/data/enron/enron_text_document"],
# "test-data-paths": ["/mnt/ssd-1/data/enron/enron_text_document", "/mnt/ssd-cluster/data/enron/enron_text_document"],
# "valid-data-paths": ["/mnt/ssd-1/data/enron/enron_text_document", "/mnt/ssd-cluster/data/enron/enron_text_document"],
# "train-data-weights": [1., 2.],
# "test-data-weights": [2., 1.],
# "valid-data-weights": [0.5, 0.4],
"vocab-file": "/mnt/ssd-1/data/gpt2-vocab.json",
"merge-file": "/mnt/ssd-1/data/gpt2-merges.txt",
"save": "/mnt/ssd-1/neox_checkpoints/dense_medium_checkpoints",
"load": "/mnt/ssd-1/neox_checkpoints/dense_medium_checkpoints",
"tensorboard-dir": "/mnt/ssd-1/tensorboard",
"log-dir": "/mnt/ssd-1/logs",
"wandb_team": "eleutherai",
"sample-input-file":"/home/mchorse/gpt-neox/samplefile.txt",
"sample-output-file":"/home/mchorse/gpt-neox/sampleoutput.txt",
"text-gen-type": "input-file",
"maximum_tokens": 256,
"temperature": 1.0,
"top_p": 0.0,
"top_k": 0,
"recompute": false,
}