{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "3fe526df-1999-4179-a044-69f0c72273c7",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/auto_gptq/nn_modules/triton_utils/kernels.py:411: FutureWarning: `torch.cuda.amp.custom_fwd(args...)` is deprecated. Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.\n",
      "  def forward(ctx, input, qweight, scales, qzeros, g_idx, bits, maxq):\n",
      "/usr/local/lib/python3.10/dist-packages/auto_gptq/nn_modules/triton_utils/kernels.py:419: FutureWarning: `torch.cuda.amp.custom_bwd(args...)` is deprecated. Please use `torch.amp.custom_bwd(args..., device_type='cuda')` instead.\n",
      "  def backward(ctx, grad_output):\n",
      "/usr/local/lib/python3.10/dist-packages/auto_gptq/nn_modules/triton_utils/kernels.py:461: FutureWarning: `torch.cuda.amp.custom_fwd(args...)` is deprecated. Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.\n",
      "  @custom_fwd(cast_inputs=torch.float16)\n",
      "/usr/local/lib/python3.10/dist-packages/peft/peft_model.py:3235: DeprecationWarning: PEFT_TYPE_TO_MODEL_MAPPING is deprecated, please use `from peft import PEFT_TYPE_TO_TUNER_MAPPING` instead. The deprecated variable will be removed in 2026.\n",
      "  warnings.warn(msg, category=DeprecationWarning)\n",
      "WARNING:auto_gptq.nn_modules.qlinear.qlinear_cuda:CUDA extension not installed.\n",
      "WARNING:auto_gptq.nn_modules.qlinear.qlinear_cuda_old:CUDA extension not installed.\n",
      "Generating test split: 100%|███████████████████████████████████████████████████████████████████████████████████| 4358/4358 [00:00<00:00, 252180.19 examples/s]\n",
      "Generating train split: 100%|████████████████████████████████████████████████████████████████████████████████| 36718/36718 [00:00<00:00, 746806.59 examples/s]\n",
      "Generating validation split: 100%|█████████████████████████████████████████████████████████████████████████████| 3760/3760 [00:00<00:00, 659995.11 examples/s]\n",
      "Quantizing model.decoder.layers blocks :   0%|                                                                                         | 0/24 [00:00<?, ?it/s]\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:07,  1.53s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.23s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.13s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.08s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.06s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.01s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :   4%|███▍                                                                             | 1/24 [00:09<03:48,  9.92s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.01s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.01s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.01s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.01s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:08<00:00,  2.00s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :   8%|██████▊                                                                          | 2/24 [00:19<03:31,  9.62s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.01s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.01s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.01s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.01s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  12%|██████████▏                                                                      | 3/24 [00:28<03:20,  9.53s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.02s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  17%|█████████████▌                                                                   | 4/24 [00:38<03:10,  9.52s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.03s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  21%|████████████████▉                                                                | 5/24 [00:47<03:01,  9.53s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.03s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  25%|████████████████████▎                                                            | 6/24 [00:57<02:51,  9.53s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.03s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  29%|███████████████████████▋                                                         | 7/24 [01:06<02:41,  9.52s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.03s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  33%|███████████████████████████                                                      | 8/24 [01:16<02:32,  9.55s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.02s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.05s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  38%|██████████████████████████████▍                                                  | 9/24 [01:26<02:23,  9.56s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.05s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  42%|█████████████████████████████████▎                                              | 10/24 [01:35<02:14,  9.58s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.06s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  46%|████████████████████████████████████▋                                           | 11/24 [01:45<02:05,  9.62s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  50%|████████████████████████████████████████                                        | 12/24 [01:55<01:55,  9.66s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.06s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  54%|███████████████████████████████████████████▎                                    | 13/24 [02:04<01:46,  9.67s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.03s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  58%|██████████████████████████████████████████████▋                                 | 14/24 [02:14<01:36,  9.68s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.05s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  62%|██████████████████████████████████████████████████                              | 15/24 [02:24<01:27,  9.69s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  67%|█████████████████████████████████████████████████████▎                          | 16/24 [02:33<01:17,  9.71s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.05s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  71%|████████████████████████████████████████████████████████▋                       | 17/24 [02:43<01:08,  9.72s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.05s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.05s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  75%|████████████████████████████████████████████████████████████                    | 18/24 [02:53<00:58,  9.73s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  79%|███████████████████████████████████████████████████████████████▎                | 19/24 [03:03<00:48,  9.73s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.05s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.08s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  83%|██████████████████████████████████████████████████████████████████▋             | 20/24 [03:13<00:39,  9.75s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.08s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  88%|██████████████████████████████████████████████████████████████████████          | 21/24 [03:22<00:29,  9.75s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  92%|█████████████████████████████████████████████████████████████████████████▎      | 22/24 [03:32<00:19,  9.74s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.07s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks :  96%|████████████████████████████████████████████████████████████████████████████▋   | 23/24 [03:42<00:09,  9.74s/it]\u001b[A\n",
      "Quantizing layers inside the block:   0%|                                                                                               | 0/6 [00:00<?, ?it/s]\u001b[A\n",
      "Quantizing layers inside the block:  17%|██████████████▌                                                                        | 1/6 [00:01<00:05,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  33%|█████████████████████████████                                                          | 2/6 [00:02<00:04,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  50%|███████████████████████████████████████████▌                                           | 3/6 [00:03<00:03,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  67%|██████████████████████████████████████████████████████████                             | 4/6 [00:04<00:02,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block:  83%|████████████████████████████████████████████████████████████████████████▌              | 5/6 [00:05<00:01,  1.04s/it]\u001b[A\n",
      "Quantizing layers inside the block: 100%|███████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:09<00:00,  2.08s/it]\u001b[A\n",
      "Quantizing model.decoder.layers blocks : 100%|████████████████████████████████████████████████████████████████████████████████| 24/24 [03:51<00:00,  9.67s/it]\u001b[A\n",
      "`loss_type=None` was set in the config but it is unrecognised.Using the default loss: `ForCausalLMLoss`.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'training': True, '_parameters': {}, '_buffers': {'qweight': tensor([[  390789188,   -86763060,   -87680292,  ...,  1289333988,\n",
      "         -1270385588,  1557769444],\n",
      "        [ 2009544615, -1737835895, -2025342840,  ...,  1152150852,\n",
      "          -865844276,  1152150596],\n",
      "        [ 1014386603,  -730541740,  -729165723,  ..., -1933824646,\n",
      "         -1807166540,  1823227275],\n",
      "        ...,\n",
      "        [ -874021891,  1143498261,  1428780325,  ...,  -590867243,\n",
      "           877194059,  -859233083],\n",
      "        [-1130604362,  1701034122,  1684257610,  ...,  -869049178,\n",
      "          1423737931,  -867935068],\n",
      "        [ 1417005668, -1131723092, -1131657796,  ..., -1434680185,\n",
      "          1488205643, -1167415993]], device='cuda:0', dtype=torch.int32), 'qzeros': tensor([[2004318071, 2004318071, 2004318071,  ..., 2004318071, 2004318071,\n",
      "         2004318071],\n",
      "        [2004318071, 2004318071, 2004318071,  ..., 2004318071, 2004318071,\n",
      "         2004318071],\n",
      "        [2004318071, 2004318071, 2004318071,  ..., 2004318071, 2004318071,\n",
      "         2004318071],\n",
      "        ...,\n",
      "        [2004318071, 2004318071, 2004318071,  ..., 2004318071, 2004318071,\n",
      "         2004318071],\n",
      "        [2004318071, 2004318071, 2004318071,  ..., 2004318071, 2004318071,\n",
      "         2004318071],\n",
      "        [2004318071, 2004318071, 2004318071,  ..., 2004318071, 2004318071,\n",
      "         2004318071]], device='cuda:0', dtype=torch.int32), 'scales': tensor([[0.0167, 0.0167, 0.0167,  ..., 0.0167, 0.0167, 0.0166],\n",
      "        [0.0170, 0.0166, 0.0168,  ..., 0.0163, 0.0168, 0.0165],\n",
      "        [0.0175, 0.0174, 0.0168,  ..., 0.0173, 0.0180, 0.0171],\n",
      "        ...,\n",
      "        [0.0176, 0.0177, 0.0170,  ..., 0.0165, 0.0172, 0.0146],\n",
      "        [0.0175, 0.0174, 0.0175,  ..., 0.0163, 0.0165, 0.0153],\n",
      "        [0.0172, 0.0177, 0.0178,  ..., 0.0182, 0.0164, 0.0176]],\n",
      "       device='cuda:0', dtype=torch.float16), 'g_idx': tensor([0, 0, 0,  ..., 7, 7, 7], device='cuda:0', dtype=torch.int32), 'bias': tensor([ 0.1249, -0.1249, -0.1249,  ...,  0.1251, -0.1252,  0.1250],\n",
      "       device='cuda:0', dtype=torch.float16)}, '_non_persistent_buffers_set': set(), '_backward_pre_hooks': OrderedDict(), '_backward_hooks': OrderedDict(), '_is_full_backward_hook': None, '_forward_hooks': OrderedDict(), '_forward_hooks_with_kwargs': OrderedDict(), '_forward_hooks_always_called': OrderedDict(), '_forward_pre_hooks': OrderedDict(), '_forward_pre_hooks_with_kwargs': OrderedDict(), '_state_dict_hooks': OrderedDict(), '_state_dict_pre_hooks': OrderedDict(), '_load_state_dict_pre_hooks': OrderedDict(), '_load_state_dict_post_hooks': OrderedDict(), '_modules': {}, 'infeatures': 1024, 'outfeatures': 1024, 'bits': 4, 'group_size': 128, 'maxq': 15, 'half_indim': 512, 'use_cuda_fp16': True, 'wf': tensor([[ 0,  4,  8, 12, 16, 20, 24, 28]], dtype=torch.int32), 'kernel_switch_threshold': 128, 'autogptq_cuda_available': False, 'autogptq_cuda': None, 'trainable': False, 'device': device(type='cuda', index=0)}\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig\n",
    "import torch\n",
    "# facebook/opt-6.7B 模型量化过程太慢了，故采用facebook/opt-350m模型\n",
    "model_name_or_path = \"facebook/opt-350m\"\n",
    "\n",
    "\"\"\"使用 GPTQ 算法支持的默认数据集来量化:\n",
    "让我们尝试使用\"wikitext2\"数据集将模型量化为4位精度。支持的精度有[2, 4, 6, 8]\n",
    "\"\"\"\n",
    "quantization_config = GPTQConfig(\n",
    "     bits=4, # 量化精度\n",
    "     group_size=128,\n",
    "     dataset=\"wikitext2\",\n",
    "     desc_act=False,\n",
    ")\n",
    "# 逐层量化\n",
    "quant_model = AutoModelForCausalLM.from_pretrained(\n",
    "    model_name_or_path,\n",
    "    quantization_config=quantization_config,\n",
    "    device_map='auto')\n",
    "\n",
    "print(quant_model.model.decoder.layers[0].self_attn.q_proj.__dict__)\n",
    "\n",
    "# 保存模型权重\n",
    "quant_model.save_pretrained(\"models/opt-350m-gptq\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "9ddc570b-550d-4497-b1c1-c260886451ae",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/awq/__init__.py:21: DeprecationWarning: \n",
      "I have left this message as the final dev message to help you transition.\n",
      "\n",
      "Important Notice:\n",
      "- AutoAWQ is officially deprecated and will no longer be maintained.\n",
      "- The last tested configuration used Torch 2.6.0 and Transformers 4.51.3.\n",
      "- If future versions of Transformers break AutoAWQ compatibility, please report the issue to the Transformers project.\n",
      "\n",
      "Alternative:\n",
      "- AutoAWQ has been adopted by the vLLM Project: https://github.com/vllm-project/llm-compressor\n",
      "\n",
      "For further inquiries, feel free to reach out:\n",
      "- X: https://x.com/casper_hansen_\n",
      "- LinkedIn: https://www.linkedin.com/in/casper-hansen-804005170/\n",
      "\n",
      "  warnings.warn(_FINAL_DEV_MESSAGE, category=DeprecationWarning, stacklevel=1)\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6ea620fde50f4ad9b6a2a26bbd0d6911",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Fetching 9 files:   0%|          | 0/9 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始量化...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Repo card metadata block was not found. Setting CardData to empty.\n",
      "AWQ: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 22/22 [15:43<00:00, 42.87s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "量化完成！\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "('models/tinyllama-1.1b-awq/tokenizer_config.json',\n",
       " 'models/tinyllama-1.1b-awq/special_tokens_map.json',\n",
       " 'models/tinyllama-1.1b-awq/tokenizer.model',\n",
       " 'models/tinyllama-1.1b-awq/added_tokens.json',\n",
       " 'models/tinyllama-1.1b-awq/tokenizer.json')"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from awq import AutoAWQForCausalLM\n",
    "from transformers import AutoTokenizer\n",
    "# 使用awq量化模型（facebook/opt系列的开源模型使用awq量化时总是报“OPTModel' object has no attribute 'rotary_emb'”，暂时没找到解决办法）\n",
    "model_path = \"TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T\"  # 原始模型\n",
    "quant_path = \"models/tinyllama-1.1b-awq\"  # 量化后保存路径\n",
    "quant_config = {\"zero_point\": True, \"q_group_size\": 128, \"w_bit\": 4, \"version\": \"GEMM\"}\n",
    "\n",
    "# 加载原始模型\n",
    "model = AutoAWQForCausalLM.from_pretrained(model_path, device_map=\"cuda\")\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n",
    "\n",
    "# 执行量化（显存需求约 6GB）\n",
    "print(\"开始量化...\")\n",
    "model.quantize(tokenizer, quant_config=quant_config)\n",
    "print(\"量化完成！\")\n",
    "\n",
    "# 保存量化模型\n",
    "model.save_quantized(quant_path)\n",
    "tokenizer.save_pretrained(quant_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "23a5d173-7b38-4bed-8d79-dffa36091fc1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/hlf_old_env/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "/root/miniconda3/envs/hlf_old_env/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[{'generated_text': 'The woman worked as a sex worker, never seen a man before, in the city, and didn'}, {'generated_text': 'The woman worked as a nurse at one of the hospitals for the Children’s Hospital of New'}, {'generated_text': 'The woman worked as a bus ride driver in the Philippines, and when she became pregnant, she didn'}]\n",
      "[{'generated_text': 'The man worked as a bus driver for a time, and had a few jobs, I know,'}, {'generated_text': 'The man worked as a teacher at the school before coming on this sub.\\nHe worked as a'}, {'generated_text': 'The man worked as a food inspector and, as a result, was told he was \"overworked'}]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Fetching 9 files: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████| 9/9 [00:00<00:00, 58074.98it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始量化。。。\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Repo card metadata block was not found. Setting CardData to empty.\n",
      "AWQ: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12/12 [01:40<00:00,  8.38s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "量化成功，保存量化模型\n",
      "使用 GPU 加载量化模型\n",
      "Merry Christmas! I'm glad to share one that you're getting one from your!\n",
      "i'd be the best to be from f\n",
      "The woman worked as a driver in the car at the car at the car at the car at the car at the car at the car at the car at the car at the car at the car at the car at the car from the car at the car at the car at the car of the car at the car at the car at the car\n"
     ]
    }
   ],
   "source": [
    "# 使用awq量化facebook/opt-125m\n",
    "from awq import AutoAWQForCausalLM\n",
    "from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AwqConfig, AutoConfig\n",
    "import torch\n",
    "\n",
    "model_name_or_path = \"facebook/opt-125m\"\n",
    "quant_path = \"models/opt-125m-awq\" # 要保存的量化模型路径\n",
    "quant_config = {\n",
    "    \"zero_point\": True,\n",
    "    \"q_group_size\": 128,\n",
    "    \"w_bit\": 4,\n",
    "    \"version\": \"GEMM\"\n",
    "}\n",
    "# 使用 GPU 加载原始的 OPT-125m 模型\n",
    "generator = pipeline('text-generation',\n",
    "                     model=model_name_or_path,\n",
    "                     device=0,\n",
    "                     do_sample=True,\n",
    "                     num_return_sequences=3)\n",
    "print(generator(\"The woman worked as a\"))\n",
    "print(generator(\"The man worked as a\"))\n",
    "\n",
    "# 加载模型\n",
    "model = AutoAWQForCausalLM.from_pretrained(model_name_or_path, device_map=\"cuda\")\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)\n",
    "print(\"开始量化。。。\")\n",
    "# 量化模型\n",
    "model.quantize(tokenizer, quant_config=quant_config)\n",
    "\n",
    "print(\"量化成功，保存量化模型\")\n",
    "# 修改配置文件以使其与transformers集成兼容\n",
    "quantization_config = AwqConfig(\n",
    "    bits=quant_config[\"w_bit\"],\n",
    "    group_size=quant_config[\"q_group_size\"],\n",
    "    zero_point=quant_config[\"zero_point\"],\n",
    "    version=quant_config[\"version\"].lower(),\n",
    ").to_dict()\n",
    "# 预训练的transformers模型存储在model属性中，我们需要传递一个字典\n",
    "model.model.config.quantization_config = quantization_config\n",
    "# 保存模型权重\n",
    "model.save_quantized(quant_path)\n",
    "# 保存分词器\n",
    "tokenizer.save_pretrained(quant_path)  \n",
    "\n",
    "print(\"使用 GPU 加载量化模型\")\n",
    "tokenizer = AutoTokenizer.from_pretrained(quant_path)\n",
    "model = AutoModelForCausalLM.from_pretrained(quant_path, device_map=\"cuda\").to(0)\n",
    "\n",
    "def generate_text(text):\n",
    "    inputs = tokenizer(text, return_tensors=\"pt\").to(0)\n",
    "\n",
    "    out = model.generate(**inputs, max_new_tokens=64)\n",
    "    return tokenizer.decode(out[0], skip_special_tokens=True)\n",
    "\n",
    "result = generate_text(\"Merry Christmas! I'm glad to\")\n",
    "print(result)\n",
    "result = generate_text(\"The woman worked as a\")\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "54b738fc-40f4-40b5-b34d-fba2aef10107",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/hlf_old_env/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试原始的 OPT-350m的输出结果\n",
      "[{'generated_text': 'The woman worked as a nurse, and had her own kids on the payroll, so she knew that'}, {'generated_text': 'The woman worked as a housewife (though it must be said for something else, such as a'}, {'generated_text': 'The woman worked as a medical lab assistant in a hospital during the cold winter months.\\n\\nAccording'}]\n",
      "[{'generated_text': 'The man worked as a truck driver for many years.\\nNo one ever said that, just to'}, {'generated_text': 'The man worked as a nurse, then became a teacher and lost a lot of money for it.'}, {'generated_text': 'The man worked as a server in a bank in Australia and his work had the reputation of being dirty'}]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1a7c98031d0a46a5a44ad0ba7360afc4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Fetching 9 files:   0%|          | 0/9 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始量化。。。\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Repo card metadata block was not found. Setting CardData to empty.\n",
      "AWQ: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 24/24 [03:56<00:00,  9.84s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "量化成功，保存量化模型\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "('models/opt-350m-awq/tokenizer_config.json',\n",
       " 'models/opt-350m-awq/special_tokens_map.json',\n",
       " 'models/opt-350m-awq/vocab.json',\n",
       " 'models/opt-350m-awq/merges.txt',\n",
       " 'models/opt-350m-awq/added_tokens.json',\n",
       " 'models/opt-350m-awq/tokenizer.json')"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 使用awq量化facebook/opt-350m（facebook/opt-6.7b与facebook/opt-2.7b量化过程中都会OOM）\n",
    "from awq import AutoAWQForCausalLM\n",
    "from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AwqConfig, AutoConfig\n",
    "import torch\n",
    "\n",
    "model_name_or_path = \"facebook/opt-350m\"\n",
    "quant_path = \"models/opt-350m-awq\" # 要保存的量化模型路径\n",
    "\"\"\"\n",
    "高精度需求\t    w_bit=4, q_group_size=64, zero_point=True\t牺牲速度换精度\n",
    "低显存环境（如T4）w_bit=4, q_group_size=128, zero_point=False\t平衡显存和速度\n",
    "最快推理速度\t     w_bit=3, q_group_size=256, zero_point=False\t适合实时应用，精度损失较大\n",
    "\"\"\"\n",
    "quant_config = {\n",
    "    \"zero_point\": True,    # 是否启用零点量化（True（非对称量化）或 False（对称量化））\n",
    "    \"q_group_size\": 128,   # 定义权重分组的粒度，常见 64、128 或 256，默认为 128\n",
    "    \"w_bit\": 4,            # 指定权重的量化位数,通常为 4（4-bit量化），也可选 3 或 8，但4-bit是精度和效率的最佳平衡\n",
    "    \"version\": \"GEMM\"      # 选择底层量化计算的实现方式，\"GEMM\"（通用矩阵乘）或 \"GEMV\"（通用矩阵-向量乘），OPT-350m更适合批量处理\n",
    "}\n",
    "# 使用 GPU 加载原始的 OPT-350m 模型\n",
    "generator = pipeline('text-generation',          # text-generation 是 Hugging Face 定义的标准任务类型，专用于自回归文本生成\n",
    "                     model=model_name_or_path,   # 模型名称或本地路径\n",
    "                     device=0,                   # 指定运行设备，0表示第一个 GPU 设备（等价于 \"cuda:0\"），如果设为-1则使用 CPU                   \n",
    "                     do_sample=True,             # 启用采样式生成，True：使用概率采样（更富创造性）\n",
    "                     num_return_sequences=3)     # 指定返回的生成结果数量\n",
    "print(\"测试原始的 OPT-350m的输出结果\")\n",
    "print(generator(\"The woman worked as a\"))\n",
    "print(generator(\"The man worked as a\"))\n",
    "\n",
    "# 加载模型\n",
    "model = AutoAWQForCausalLM.from_pretrained(model_name_or_path, device_map=\"cuda\")\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)\n",
    "print(\"开始量化。。。\")\n",
    "# 量化模型\n",
    "model.quantize(tokenizer, quant_config=quant_config)\n",
    "\n",
    "print(\"量化成功，保存量化模型\")\n",
    "# 修改配置文件以使其与transformers集成兼容\n",
    "quantization_config = AwqConfig(\n",
    "    bits=quant_config[\"w_bit\"],\n",
    "    group_size=quant_config[\"q_group_size\"],\n",
    "    zero_point=quant_config[\"zero_point\"],\n",
    "    version=quant_config[\"version\"].lower(),\n",
    ").to_dict()\n",
    "# 预训练的transformers模型存储在model属性中，我们需要传递一个字典\n",
    "model.model.config.quantization_config = quantization_config\n",
    "# 保存模型权重\n",
    "model.save_quantized(quant_path)\n",
    "# 保存分词器\n",
    "tokenizer.save_pretrained(quant_path)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "80768db8-5caa-4c7e-805f-13f1dfc29468",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "hlf_old_env",
   "language": "python",
   "name": "hlf_old_env"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
