winglian's picture
Upload folder using huggingface_hub
4b32a7f verified
raw
history blame contribute delete
No virus
5.42 kB
# base_model: meta-llama/Meta-Llama-3-8B
base_model: winglian/Llama-3-8b-64k-PoSE
model_type: LlamaForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: true
load_in_4bit: false
strict: false
datasets:
- path: cerebras/SlimPajama-627B
data_files:
- train/chunk1/example_train_0.jsonl.zst
- train/chunk1/example_train_1.jsonl.zst
- train/chunk1/example_train_2.jsonl.zst
- train/chunk1/example_train_3.jsonl.zst
- train/chunk1/example_train_4.jsonl.zst
- train/chunk1/example_train_5.jsonl.zst
- train/chunk1/example_train_6.jsonl.zst
- train/chunk1/example_train_7.jsonl.zst
- train/chunk1/example_train_8.jsonl.zst
- train/chunk1/example_train_9.jsonl.zst
- train/chunk1/example_train_10.jsonl.zst
- train/chunk1/example_train_11.jsonl.zst
- train/chunk1/example_train_12.jsonl.zst
- train/chunk1/example_train_13.jsonl.zst
- train/chunk1/example_train_14.jsonl.zst
- train/chunk1/example_train_15.jsonl.zst
- train/chunk1/example_train_16.jsonl.zst
- train/chunk1/example_train_17.jsonl.zst
- train/chunk1/example_train_18.jsonl.zst
- train/chunk1/example_train_19.jsonl.zst
- train/chunk1/example_train_20.jsonl.zst
- train/chunk1/example_train_21.jsonl.zst
- train/chunk1/example_train_22.jsonl.zst
- train/chunk1/example_train_23.jsonl.zst
- train/chunk1/example_train_24.jsonl.zst
- train/chunk1/example_train_25.jsonl.zst
- train/chunk1/example_train_26.jsonl.zst
- train/chunk1/example_train_27.jsonl.zst
- train/chunk1/example_train_28.jsonl.zst
- train/chunk1/example_train_29.jsonl.zst
- train/chunk1/example_train_30.jsonl.zst
- train/chunk1/example_train_31.jsonl.zst
- train/chunk1/example_train_32.jsonl.zst
- train/chunk1/example_train_33.jsonl.zst
- train/chunk1/example_train_34.jsonl.zst
- train/chunk1/example_train_35.jsonl.zst
- train/chunk1/example_train_36.jsonl.zst
- train/chunk1/example_train_37.jsonl.zst
- train/chunk1/example_train_38.jsonl.zst
- train/chunk1/example_train_39.jsonl.zst
- train/chunk1/example_train_40.jsonl.zst
- train/chunk1/example_train_41.jsonl.zst
- train/chunk1/example_train_42.jsonl.zst
- train/chunk1/example_train_43.jsonl.zst
- train/chunk1/example_train_44.jsonl.zst
- train/chunk1/example_train_45.jsonl.zst
- train/chunk1/example_train_46.jsonl.zst
- train/chunk1/example_train_47.jsonl.zst
- train/chunk1/example_train_48.jsonl.zst
- train/chunk1/example_train_49.jsonl.zst
- train/chunk1/example_train_50.jsonl.zst
- train/chunk1/example_train_51.jsonl.zst
- train/chunk1/example_train_52.jsonl.zst
- train/chunk1/example_train_53.jsonl.zst
- train/chunk1/example_train_54.jsonl.zst
- train/chunk1/example_train_55.jsonl.zst
- train/chunk1/example_train_56.jsonl.zst
- train/chunk1/example_train_57.jsonl.zst
- train/chunk1/example_train_58.jsonl.zst
- train/chunk1/example_train_59.jsonl.zst
- train/chunk1/example_train_60.jsonl.zst
- train/chunk1/example_train_61.jsonl.zst
- train/chunk1/example_train_62.jsonl.zst
- train/chunk1/example_train_63.jsonl.zst
- train/chunk1/example_train_64.jsonl.zst
- train/chunk1/example_train_65.jsonl.zst
- train/chunk1/example_train_66.jsonl.zst
- train/chunk1/example_train_67.jsonl.zst
- train/chunk1/example_train_68.jsonl.zst
- train/chunk1/example_train_69.jsonl.zst
- train/chunk1/example_train_70.jsonl.zst
- train/chunk1/example_train_71.jsonl.zst
- train/chunk1/example_train_72.jsonl.zst
- train/chunk1/example_train_73.jsonl.zst
- train/chunk1/example_train_74.jsonl.zst
- train/chunk1/example_train_75.jsonl.zst
- train/chunk1/example_train_76.jsonl.zst
- train/chunk1/example_train_77.jsonl.zst
- train/chunk1/example_train_78.jsonl.zst
- train/chunk1/example_train_79.jsonl.zst
type: completion
split: train
dataset_prepared_path: last_run_prepared
val_set_size: 0.001
output_dir: ./llama-3-32k
save_safetensors: true
sequence_len: 8192
sample_packing: false
pad_to_sequence_len: false
use_pose: true
pose_max_context_len: 262144
min_sample_len: 6144
pose_num_chunks: 16
curriculum_sampling: true
overrides_of_model_config:
rope_theta: 500000.0
max_position_embeddings: 262144
# peft_use_dora: true
adapter: lora
peft_use_rslora: true
lora_model_dir:
lora_r: 1024
lora_alpha: 1024
lora_dropout: 0.1
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
wandb_project: llama-3-262k
wandb_entity: oaaic
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 8
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.00001
max_grad_norm: 1.0
adam_beta2: 0.95
train_on_inputs: false
group_by_length: false
bf16: true
fp16:
tf32: true
gradient_checkpointing: true
gradient_checkpointing_kwargs:
use_reentrant: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true
sdp_attention:
s2_attention:
warmup_steps: 10
evals_per_epoch: 8
saves_per_epoch: 8
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
pad_token: <|end_of_text|>