Edit model card

Experimental pre-training on instruction datasets. https://wandb.ai/open-assistant/supervised-finetuning/runs/ys9rt5ue

Checkpoint: 3500 steps

Used oasst dataset config:

pretrain:
  use_custom_sampler: true
  sort_by_length: false
  datasets:
    - joke
    - webgpt:
        val_split: 0.1
    - gpt4all:
        val_split: 0.01
    - alpaca:
        val_split: 0.025
    - code_alpaca:
        val_split: 0.05
    - minimath
    - humaneval_mbpp_codegen_qa
    - humaneval_mbpp_testgen_qa
    - grade_school_math_instructions
    - recipes
    - cmu_wiki_qa
    #- youtube_subs_howto100m # uses incompatible column names
    #- ubuntu_dialogue_qa # fails to load
    - oa_wiki_qa_bart_10000row
    - prosocial_dialogue:
        fraction: 0.1
    - explain_prosocial:
        fraction: 0.05

pythia parameters:

pythia-12b:
  dtype: fp16
  log_dir: "pythia_log_12b"
  learning_rate: 6e-6
  model_name: EleutherAI/pythia-12b-deduped
  output_dir: pythia_model_12b
  weight_decay: 0.0
  max_length: 2048
  use_flash_attention: true
  #deepspeed_config: configs/zero3_config.json
  warmup_steps: 50
  gradient_checkpointing: true
  gradient_accumulation_steps: 8
  per_device_train_batch_size: 2 
  per_device_eval_batch_size: 5
  eval_steps: 200
  save_steps: 500
  num_train_epochs: 2 
  save_total_limit: 2
Downloads last month
2