WCNegentropy commited on
Commit
6d2801b
·
verified ·
1 Parent(s): 902f2d4

🚀 Refined BitTransformerLM: Organized codebase with best practices

Browse files
scripts/benchmarks/wikitext_benchmark.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from datasets import load_dataset
4
+ from bit_transformer import text_to_bits, collapse_submodel
5
+ from progressive_scaleup import progressive_scale_up_text
6
+
7
+
8
+ def lines_to_bits(lines, max_len=64):
9
+ data = []
10
+ for text in lines:
11
+ bits = text_to_bits(text)[:max_len]
12
+ if len(bits) < max_len:
13
+ bits.extend([0] * (max_len - len(bits)))
14
+ data.append(bits)
15
+ return data
16
+
17
+
18
+ def main():
19
+ ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="train[:1%]")
20
+ val_ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="validation[:1%]")
21
+ train_lines = [item["text"] for item in ds][:256]
22
+ valid_lines = [item["text"] for item in val_ds][:64]
23
+
24
+ train_bits = lines_to_bits(train_lines)
25
+ valid_bits = lines_to_bits(valid_lines)
26
+
27
+ progressive_scale_up_text(
28
+ eps=0.65,
29
+ steps=4,
30
+ width_mult=2.0,
31
+ max_len=64,
32
+ dataset_size=min(64, len(train_bits)),
33
+ )
34
+
35
+ target_params = dict(d_model=16, nhead=4, num_layers=1, dim_feedforward=64, max_seq_len=64)
36
+ model, _ = collapse_submodel(train_bits[:64], target_params, max_rounds=1)
37
+
38
+ val_tensor = torch.tensor(valid_bits, dtype=torch.long)
39
+ logits, _ = model(val_tensor)
40
+ pred = logits[:, :-1, :].reshape(-1, 2)
41
+ target = val_tensor[:, 1:].reshape(-1)
42
+ loss = F.cross_entropy(pred, target)
43
+ print("Collapsed model validation loss:", loss.item())
44
+
45
+
46
+ if __name__ == "__main__":
47
+ main()