gate369 commited on
Commit
cb5adc0
1 Parent(s): 3a32138

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +74 -0
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - abideen/Cosmopedia-100k-pretrain
4
+ tags:
5
+ - Mistral
6
+ - 1bit
7
+ - bitnet
8
+ - abideen
9
+ - M7
10
+ - Liminerity
11
+ ---
12
+ """this is my second attempt at converting a model float16 quantized model to 1.5bit. i used my model liminerity/M7-7b for the base model and
13
+ trained on: abideen/cosmopedia-100k-pretain dataset and used his google colab project to make this"""
14
+
15
+ #EXAMPLE INFERENCE CODE FROM ABIDEEN'S COLAB PROJECT
16
+ ```
17
+ from transformers import AutoModelForCausalLM, AutoTokenizer
18
+ from transformers.models.llama.modeling_llama import *
19
+ # Load a pretrained BitNet model
20
+ model = "liminerity/Bitnet-M7-70M"
21
+ tokenizer = AutoTokenizer.from_pretrained(model)
22
+ model = AutoModelForCausalLM.from_pretrained(model)
23
+
24
+
25
+ def activation_quant(x):
26
+ scale = 127.0 / x.abs().max(dim=-1, keepdim=True).values.clamp_(min=1e-5)
27
+ y = (x * scale).round().clamp_(-128, 127)
28
+ y = y / scale
29
+ return y
30
+ def weight_quant(w):
31
+ scale = 1.0 / w.abs().mean().clamp_(min=1e-5)
32
+ u = (w * scale).round().clamp_(-1, 1)
33
+ u = u / scale
34
+ return u
35
+
36
+ class BitLinear(nn.Linear):
37
+ def forward(self, x):
38
+ w = self.weight # a weight tensor with shape [d, k]
39
+ x = x.to(w.device)
40
+ RMSNorm = LlamaRMSNorm(x.shape[-1]).to(w.device)
41
+ x_norm = RMSNorm(x)
42
+ # A trick for implementing Straight−Through−Estimator (STE) using detach()
43
+ x_quant = x_norm + (activation_quant(x_norm) - x_norm).detach()
44
+ w_quant = w + (weight_quant(w) - w).detach()
45
+ y = F.linear(x_quant, w_quant)
46
+ return y
47
+
48
+ def convert_to_bitnet(model, copy_weights):
49
+ for name, module in model.named_modules():
50
+ # Replace linear layers with BitNet
51
+ if isinstance(module, LlamaSdpaAttention) or isinstance(module, LlamaMLP):
52
+ for child_name, child_module in module.named_children():
53
+ if isinstance(child_module, nn.Linear):
54
+ bitlinear = BitLinear(child_module.in_features, child_module.out_features, child_module.bias is not None).to(device="cuda:0")
55
+ if copy_weights:
56
+ bitlinear.weight = child_module.weight
57
+ if child_module.bias is not None:
58
+ bitlinear.bias = child_module.bias
59
+ setattr(module, child_name, bitlinear)
60
+ # Remove redundant input_layernorms
61
+ elif isinstance(module, LlamaDecoderLayer):
62
+ for child_name, child_module in module.named_children():
63
+ if isinstance(child_module, LlamaRMSNorm) and child_name == "input_layernorm":
64
+ setattr(module, child_name, nn.Identity().to(device="cuda:0"))
65
+
66
+
67
+ convert_to_bitnet(model, copy_weights=True)
68
+ model.to(device="cuda:0")
69
+
70
+ prompt = "What is Machine Learning?"
71
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
72
+ generate_ids = model.generate(inputs.input_ids, max_length=50)
73
+ tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
74
+ ```