nroggendorff commited on
Commit
fea8645
1 Parent(s): 55428c0

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +5 -5
train.py CHANGED
@@ -10,23 +10,23 @@ from tokenizers import ByteLevelBPETokenizer
10
  MAX_SEQ_LENGTH = 128
11
  BATCH_SIZE = 16
12
  EPOCHS = 2
13
- LEARNING_RATE = 1e-3
14
  FACTOR = 1024
15
  VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
17
- INSTRUCT_DATASET = "nroggendorff/openhermes"
18
  OUTPUT_REPO = "smallama"
19
  FP16 = False
20
  WARMUP_STEPS = 20
21
  DECAY = 0
22
- GRADIENT_ACCUMULATION_STEPS = 16
23
  PUSH_TO_HUB = True
24
 
25
  def load_data():
26
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
27
- pretrain = Dataset.from_generator(lambda: pretrain.take(int(5e+5)))
28
  instruct = load_dataset(INSTRUCT_DATASET, split="train", streaming=True)
29
- instruct = Dataset.from_generator(lambda: instruct.take(int(8e+5)))
30
  dataset_dict = DatasetDict({
31
  'pretrain': pretrain,
32
  'instruct': instruct
 
10
  MAX_SEQ_LENGTH = 128
11
  BATCH_SIZE = 16
12
  EPOCHS = 2
13
+ LEARNING_RATE = 2e-3
14
  FACTOR = 1024
15
  VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
17
+ INSTRUCT_DATASET = "nroggendorff/elephant"
18
  OUTPUT_REPO = "smallama"
19
  FP16 = False
20
  WARMUP_STEPS = 20
21
  DECAY = 0
22
+ GRADIENT_ACCUMULATION_STEPS = 64
23
  PUSH_TO_HUB = True
24
 
25
  def load_data():
26
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
27
+ pretrain = Dataset.from_generator(lambda: pretrain.take(int(5e+6)))
28
  instruct = load_dataset(INSTRUCT_DATASET, split="train", streaming=True)
29
+ instruct = Dataset.from_generator(lambda: instruct.take(int(3e+6)))
30
  dataset_dict = DatasetDict({
31
  'pretrain': pretrain,
32
  'instruct': instruct