sabazo commited on
Commit
647a5e9
1 Parent(s): c49aa7c

Created using Colaboratory

Browse files
🤖_AutoQuantize_(GGUF,_AWQ,_EXL2,_GPTQ).ipynb ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4",
8
+ "include_colab_link": true
9
+ },
10
+ "kernelspec": {
11
+ "name": "python3",
12
+ "display_name": "Python 3"
13
+ },
14
+ "language_info": {
15
+ "name": "python"
16
+ },
17
+ "accelerator": "GPU"
18
+ },
19
+ "cells": [
20
+ {
21
+ "cell_type": "markdown",
22
+ "metadata": {
23
+ "id": "view-in-github",
24
+ "colab_type": "text"
25
+ },
26
+ "source": [
27
+ "<a href=\"https://colab.research.google.com/github/almutareb/InnovationPathfinderAI/blob/main/%F0%9F%A4%96_AutoQuantize_(GGUF%2C_AWQ%2C_EXL2%2C_GPTQ).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "source": [
33
+ "# @title # 🤖 AutoQuantize\n",
34
+ "\n",
35
+ "# @markdown 🔮 Created by [@zainulabideen](https://huggingface.co/abideen).\n",
36
+ "\n",
37
+ "# @markdown Please add HF token to the secrets tab in Google Colab before.\n",
38
+ "\n",
39
+ "# @markdown Quantization formats supported: `GGUF`, `AWQ`, `EXL2`, `GPTQ`\n",
40
+ "\n",
41
+ "# @markdown ---\n",
42
+ "\n",
43
+ "\n",
44
+ "# @markdown ### 🤗 Hugging Face Hub\n",
45
+ "\n",
46
+ "MODEL_ID = \"abideen/Heimer-dpo-TinyLlama-1.1B\" # @param {type:\"string\"}\n",
47
+ "MODEL_NAME = MODEL_ID.split('/')[-1]\n",
48
+ "\n",
49
+ "# Download model\n",
50
+ "!git lfs install\n",
51
+ "!git clone https://huggingface.co/{MODEL_ID}\n",
52
+ "\n",
53
+ "username = \"abideen\" # @param {type:\"string\"}\n",
54
+ "token = \"\" # @param {type:\"string\"}\n",
55
+ "!pip install -q huggingface_hub\n",
56
+ "from huggingface_hub import create_repo, HfApi\n",
57
+ "from google.colab import userdata, runtime"
58
+ ],
59
+ "metadata": {
60
+ "id": "fD24jJxq7t3k",
61
+ "cellView": "form"
62
+ },
63
+ "execution_count": null,
64
+ "outputs": []
65
+ },
66
+ {
67
+ "cell_type": "code",
68
+ "source": [
69
+ "# @title # 🛸 GGUF\n",
70
+ "# @markdown ### ✨ Quantization parameters\n",
71
+ "\n",
72
+ "QUANTIZATION_FORMAT = \"q4_k_m\" # @param {type:\"string\"}\n",
73
+ "QUANTIZATION_METHODS = QUANTIZATION_FORMAT.replace(\" \", \"\").split(\",\")\n",
74
+ "# Install llama.cpp\n",
75
+ "!git clone https://github.com/ggerganov/llama.cpp\n",
76
+ "!cd llama.cpp && git pull && make clean && LLAMA_CUBLAS=1 make\n",
77
+ "!pip install -r llama.cpp/requirements.txt\n",
78
+ "\n",
79
+ "# Convert to fp16\n",
80
+ "fp16 = f\"{MODEL_NAME}/{MODEL_NAME.lower()}.fp16.bin\"\n",
81
+ "!python llama.cpp/convert.py {MODEL_NAME} --outtype f16 --outfile {fp16}\n",
82
+ "\n",
83
+ "# Quantize the model for each method in the QUANTIZATION_METHODS list\n",
84
+ "for method in QUANTIZATION_METHODS:\n",
85
+ " qtype = f\"{MODEL_NAME}/{MODEL_NAME.lower()}.{method.upper()}.gguf\"\n",
86
+ " !./llama.cpp/quantize {fp16} {qtype} {method}\n",
87
+ "\n",
88
+ "# Defined in the secrets tab in Google Colab\n",
89
+ "hf_token = userdata.get(token)\n",
90
+ "api = HfApi()\n",
91
+ "\n",
92
+ "# Create empty repo\n",
93
+ "create_repo(\n",
94
+ " repo_id = f\"{username}/{MODEL_NAME}-GGUF\",\n",
95
+ " repo_type=\"model\",\n",
96
+ " exist_ok=True,\n",
97
+ " token=hf_token\n",
98
+ ")\n",
99
+ "\n",
100
+ "# Upload gguf files\n",
101
+ "api.upload_folder(\n",
102
+ " folder_path=MODEL_NAME,\n",
103
+ " repo_id=f\"{username}/{MODEL_NAME}-GGUF\",\n",
104
+ " allow_patterns=[\"*.gguf\",\"$.md\"],\n",
105
+ " token=hf_token\n",
106
+ ")"
107
+ ],
108
+ "metadata": {
109
+ "id": "NL0yGhbe3EFk",
110
+ "cellView": "form"
111
+ },
112
+ "execution_count": null,
113
+ "outputs": []
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "source": [
118
+ "# @title # 🏛️ AWQ\n",
119
+ "# @markdown ### ✨ Quantization parameters\n",
120
+ "\n",
121
+ "Q_GROUP_SIZE = 128 # @param {type:\"integer\"}\n",
122
+ "ZERO_POINT = True # @param {text:\"boolean\"}\n",
123
+ "W_BIT = 4 # @param {type:\"integer\"}\n",
124
+ "VERSION = \"GEMM\" # @param {type:\"string\"}\n",
125
+ "SAFETENSORS = True # @param {text:\"boolean\"}\n",
126
+ "\n",
127
+ "# Install AutoAWQ\n",
128
+ "!git clone https://github.com/casper-hansen/AutoAWQ\n",
129
+ "%cd AutoAWQ\n",
130
+ "!pip install -e .\n",
131
+ "!pip install git+https://github.com/huggingface/transformers\n",
132
+ "!pip install zstandard\n",
133
+ "\n",
134
+ "from awq import AutoAWQForCausalLM\n",
135
+ "from transformers import AutoTokenizer\n",
136
+ "\n",
137
+ "\n",
138
+ "quant_path = MODEL_NAME + \"-awq\"\n",
139
+ "quant_config = { \"zero_point\": ZERO_POINT, \"q_group_size\": Q_GROUP_SIZE, \"w_bit\": W_BIT, \"version\": VERSION }\n",
140
+ "\n",
141
+ "# Load model\n",
142
+ "PATH = \"/content/\" + MODEL_NAME\n",
143
+ "model = AutoAWQForCausalLM.from_pretrained(PATH, safetensors=SAFETENSORS)\n",
144
+ "tokenizer = AutoTokenizer.from_pretrained(PATH, trust_remote_code=True)\n",
145
+ "\n",
146
+ "# Quantize\n",
147
+ "model.quantize(tokenizer, quant_config=quant_config)\n",
148
+ "\n",
149
+ "# Save quantized model\n",
150
+ "model.save_quantized(quant_path)\n",
151
+ "tokenizer.save_pretrained(quant_path)\n",
152
+ "\n",
153
+ "# Defined in the secrets tab in Google Colab\n",
154
+ "hf_token = userdata.get(token)\n",
155
+ "api = HfApi()\n",
156
+ "\n",
157
+ "# Create empty repo\n",
158
+ "create_repo(\n",
159
+ " repo_id = f\"{username}/{MODEL_NAME}-AWQ\",\n",
160
+ " repo_type=\"model\",\n",
161
+ " exist_ok=True,\n",
162
+ " token=hf_token\n",
163
+ ")\n",
164
+ "\n",
165
+ "# Upload awq files\n",
166
+ "api.upload_folder(\n",
167
+ " folder_path=quant_path,\n",
168
+ " repo_id=f\"{username}/{MODEL_NAME}-AWQ\",\n",
169
+ " token=hf_token\n",
170
+ ")"
171
+ ],
172
+ "metadata": {
173
+ "id": "MyyUO2Fj3WHt",
174
+ "cellView": "form"
175
+ },
176
+ "execution_count": null,
177
+ "outputs": []
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "source": [
182
+ "# @title # 🔬 EXL2\n",
183
+ "# @markdown ### ✨ Quantization parameters\n",
184
+ "\n",
185
+ "BPW = 5.0 # @param {type:\"number\"}\n",
186
+ "\n",
187
+ "# Install ExLLamaV2\n",
188
+ "!git clone https://github.com/turboderp/exllamav2\n",
189
+ "!pip install -e exllamav2\n",
190
+ "\n",
191
+ "!mv {MODEL_NAME} base_model\n",
192
+ "!rm base_mode/*.bin\n",
193
+ "\n",
194
+ "# Download dataset\n",
195
+ "!wget https://huggingface.co/datasets/wikitext/resolve/9a9e482b5987f9d25b3a9b2883fc6cc9fd8071b3/wikitext-103-v1/wikitext-test.parquet\n",
196
+ "\n",
197
+ "# Quantize model\n",
198
+ "!mkdir quant\n",
199
+ "!python exllamav2/convert.py \\\n",
200
+ " -i base_model \\\n",
201
+ " -o quant \\\n",
202
+ " -c wikitext-test.parquet \\\n",
203
+ " -b {BPW}\n",
204
+ "\n",
205
+ "# Copy files\n",
206
+ "!rm -rf quant/out_tensor\n",
207
+ "!rsync -av --exclude='*.safetensors' --exclude='.*' ./base_model/ ./quant/\n",
208
+ "\n",
209
+ "# Defined in the secrets tab in Google Colab\n",
210
+ "hf_token = userdata.get(token)\n",
211
+ "api = HfApi()\n",
212
+ "\n",
213
+ "# Create empty repo\n",
214
+ "create_repo(\n",
215
+ " repo_id = f\"{username}/{MODEL_NAME}-{BPW:.1f}bpw-exl2\",\n",
216
+ " repo_type=\"model\",\n",
217
+ " exist_ok=True,\n",
218
+ " token=hf_token\n",
219
+ ")\n",
220
+ "\n",
221
+ "# Upload exl2 files\n",
222
+ "api.upload_folder(\n",
223
+ " folder_path=quant,\n",
224
+ " repo_id=f\"{username}/{MODEL_NAME}-{BPW:.1f}bpw-exl2\",\n",
225
+ " token=hf_token\n",
226
+ ")"
227
+ ],
228
+ "metadata": {
229
+ "id": "ZC9Nsr9u5WhN",
230
+ "cellView": "form"
231
+ },
232
+ "execution_count": null,
233
+ "outputs": []
234
+ },
235
+ {
236
+ "cell_type": "code",
237
+ "source": [
238
+ "# @title # 📝 GPTQ\n",
239
+ "# @markdown ### ✨ Quantization parameters\n",
240
+ "\n",
241
+ "BITS = 4 # @param {type:\"integer\"}\n",
242
+ "GROUP_SIZE = 128 # @param {type:\"integer\"}\n",
243
+ "DAMP_PERCENT = 0.01 # @param {type:\"number\"}\n",
244
+ "\n",
245
+ "!BUILD_CUDA_EXT=0 pip install -q auto-gptq transformers\n",
246
+ "import random\n",
247
+ "from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig\n",
248
+ "from datasets import load_dataset\n",
249
+ "import torch\n",
250
+ "from transformers import AutoTokenizer\n",
251
+ "out_dir = MODEL_ID + \"-GPTQ\"\n",
252
+ "\n",
253
+ "# Load quantize config, model and tokenizer\n",
254
+ "quantize_config = BaseQuantizeConfig(\n",
255
+ " bits=BITS,\n",
256
+ " group_size=GROUP_SIZE,\n",
257
+ " damp_percent=DAMP_PERCENT,\n",
258
+ " desc_act=False,\n",
259
+ ")\n",
260
+ "PATH = \"/content/\" + MODEL_NAME\n",
261
+ "model = AutoGPTQForCausalLM.from_pretrained(PATH, quantize_config)\n",
262
+ "tokenizer = AutoTokenizer.from_pretrained(PATH)\n",
263
+ "\n",
264
+ "# Load data and tokenize examples\n",
265
+ "n_samples = 1024\n",
266
+ "data = load_dataset(\"allenai/c4\", data_files=\"en/c4-train.00001-of-01024.json.gz\", split=f\"train[:{n_samples*5}]\")\n",
267
+ "tokenized_data = tokenizer(\"\\n\\n\".join(data['text']), return_tensors='pt')\n",
268
+ "\n",
269
+ "# Format tokenized examples\n",
270
+ "examples_ids = []\n",
271
+ "for _ in range(n_samples):\n",
272
+ " i = random.randint(0, tokenized_data.input_ids.shape[1] - tokenizer.model_max_length - 1)\n",
273
+ " j = i + tokenizer.model_max_length\n",
274
+ " input_ids = tokenized_data.input_ids[:, i:j]\n",
275
+ " attention_mask = torch.ones_like(input_ids)\n",
276
+ " examples_ids.append({'input_ids': input_ids, 'attention_mask': attention_mask})\n",
277
+ "\n",
278
+ "# Quantize with GPTQ\n",
279
+ "model.quantize(\n",
280
+ " examples_ids,\n",
281
+ " batch_size=1,\n",
282
+ " use_triton=True,\n",
283
+ ")\n",
284
+ "\n",
285
+ "# Save model and tokenizer\n",
286
+ "model.save_quantized(out_dir, use_safetensors=True)\n",
287
+ "tokenizer.save_pretrained(out_dir)\n",
288
+ "\n",
289
+ "# Defined in the secrets tab in Google Colab\n",
290
+ "hf_token = userdata.get(token)\n",
291
+ "api = HfApi()\n",
292
+ "\n",
293
+ "# Create empty repo\n",
294
+ "create_repo(\n",
295
+ " repo_id = f\"{username}/{MODEL_NAME}-GPTQ\",\n",
296
+ " repo_type=\"model\",\n",
297
+ " exist_ok=True,\n",
298
+ " token=hf_token\n",
299
+ ")\n",
300
+ "\n",
301
+ "# Upload gptq files\n",
302
+ "api.upload_folder(\n",
303
+ " folder_path=out_dir,\n",
304
+ " repo_id=f\"{username}/{MODEL_NAME}-GPTQ\",\n",
305
+ " token=hf_token\n",
306
+ ")\n"
307
+ ],
308
+ "metadata": {
309
+ "id": "OE_R3AXG5Y-F",
310
+ "cellView": "form"
311
+ },
312
+ "execution_count": null,
313
+ "outputs": []
314
+ }
315
+ ]
316
+ }