{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "5d89892e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[33mWARNING: Error parsing dependencies of send2trash: Expected matching RIGHT_PARENTHESIS for LEFT_PARENTHESIS, after version specifier\r\n",
      "    sys-platform (==\"darwin\") ; extra == 'objc'\r\n",
      "                 ~^\u001b[0m\u001b[33m\r\n",
      "\u001b[0m"
     ]
    }
   ],
   "source": [
    "!pip install torch transformers datasets  --quiet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "7b034b5d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using device: cuda\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-large-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "9f023bad24334b76a5225ec84c7e8bc2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Map:   0%|          | 0/67349 [00:00<?, ? examples/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e4ab70afa5424443b2f765dc191caa30",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Map:   0%|          | 0/872 [00:00<?, ? examples/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "955471bb99364f0cbd5a5a4a5142caec",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Map:   0%|          | 0/1821 [00:00<?, ? examples/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Sample input: {'input_ids': [101, 5342, 2047, 3595, 8496, 2013, 1996, 18643, 3197, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': 0}\n",
      "Keys in sample: dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'labels'])\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='28' max='14' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [14/14 39:39]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pre Fine-tuned Teacher Model Performance: {'eval_loss': 0.7186707854270935, 'eval_accuracy': 0.43004587155963303, 'eval_runtime': 4.3114, 'eval_samples_per_second': 202.257, 'eval_steps_per_second': 3.247}\n",
      "Fine-tuning the teacher model...\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='6315' max='6315' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [6315/6315 1:43:11, Epoch 3/3]\n",
       "    </div>\n",
       "    <table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       " <tr style=\"text-align: left;\">\n",
       "      <th>Epoch</th>\n",
       "      <th>Training Loss</th>\n",
       "      <th>Validation Loss</th>\n",
       "      <th>Accuracy</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>0.161800</td>\n",
       "      <td>0.204711</td>\n",
       "      <td>0.932339</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>0.109200</td>\n",
       "      <td>0.267002</td>\n",
       "      <td>0.916284</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>0.060400</td>\n",
       "      <td>0.293599</td>\n",
       "      <td>0.919725</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table><p>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='14' max='14' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [14/14 00:04]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Fine-tuned Teacher Model Performance: {'eval_loss': 0.2935987710952759, 'eval_accuracy': 0.9197247706422018, 'eval_runtime': 4.4775, 'eval_samples_per_second': 194.752, 'eval_steps_per_second': 3.127, 'epoch': 3.0}\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='28' max='14' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [14/14 11:51]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pre-distillation Student Model Performance: {'eval_loss': 1.4624680280685425, 'eval_accuracy': 0.4908256880733945, 'eval_runtime': 6.188, 'eval_samples_per_second': 140.919, 'eval_steps_per_second': 2.262}\n",
      "Starting distillation...\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='6315' max='6315' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [6315/6315 35:09, Epoch 3/3]\n",
       "    </div>\n",
       "    <table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       " <tr style=\"text-align: left;\">\n",
       "      <th>Epoch</th>\n",
       "      <th>Training Loss</th>\n",
       "      <th>Validation Loss</th>\n",
       "      <th>Accuracy</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>0.260300</td>\n",
       "      <td>0.296434</td>\n",
       "      <td>0.912844</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>0.132700</td>\n",
       "      <td>0.323626</td>\n",
       "      <td>0.915138</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>0.063600</td>\n",
       "      <td>0.318399</td>\n",
       "      <td>0.924312</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table><p>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='14' max='14' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [14/14 00:05]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Post-distillation Student Model Performance: {'eval_loss': 0.31839948892593384, 'eval_accuracy': 0.9243119266055045, 'eval_runtime': 5.8479, 'eval_samples_per_second': 149.113, 'eval_steps_per_second': 2.394, 'epoch': 3.0}\n",
      "\n",
      "Performance Comparison:\n",
      "Fine-tuned Teacher Model: {'eval_loss': 0.2935987710952759, 'eval_accuracy': 0.9197247706422018, 'eval_runtime': 4.4775, 'eval_samples_per_second': 194.752, 'eval_steps_per_second': 3.127, 'epoch': 3.0}\n",
      "Pre-distillation Student Model: {'eval_loss': 1.4624680280685425, 'eval_accuracy': 0.4908256880733945, 'eval_runtime': 6.188, 'eval_samples_per_second': 140.919, 'eval_steps_per_second': 2.262}\n",
      "Post-distillation Student Model: {'eval_loss': 0.31839948892593384, 'eval_accuracy': 0.9243119266055045, 'eval_runtime': 5.8479, 'eval_samples_per_second': 149.113, 'eval_steps_per_second': 2.394, 'epoch': 3.0}\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n",
    "from datasets import load_dataset\n",
    "from torch.nn import functional as F\n",
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "# Check if CUDA is available\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(f\"Using device: {device}\")\n",
    "\n",
    "# Load teacher and student models\n",
    "teacher_model_name = \"bert-large-uncased\"\n",
    "student_model_name = \"bert-base-uncased\"\n",
    "teacher_model = AutoModelForSequenceClassification.from_pretrained(teacher_model_name, num_labels=2).to(device)\n",
    "student_model = AutoModelForSequenceClassification.from_pretrained(student_model_name, num_labels=2).to(device)\n",
    "\n",
    "# Load tokenizer\n",
    "tokenizer = AutoTokenizer.from_pretrained(student_model_name)\n",
    "\n",
    "# Load SST-2 dataset\n",
    "dataset = load_dataset(\"glue\", \"sst2\")\n",
    "\n",
    "# Data collator function\n",
    "def data_collator(features):\n",
    "    batch = {k: torch.tensor([f[k] for f in features]) for k in features[0].keys()}\n",
    "    return batch\n",
    "\n",
    "# Distillation loss function\n",
    "def distillation_loss(student_logits, teacher_logits, labels, alpha=0.5, temperature=2.0):\n",
    "    loss_ce = F.cross_entropy(student_logits, labels)\n",
    "    loss_kl = F.kl_div(\n",
    "        F.log_softmax(student_logits / temperature, dim=-1),\n",
    "        F.softmax(teacher_logits / temperature, dim=-1),\n",
    "        reduction=\"batchmean\"\n",
    "    ) * (temperature ** 2)\n",
    "    return alpha * loss_ce + (1 - alpha) * loss_kl\n",
    "\n",
    "# Modify the preprocess_function\n",
    "def preprocess_function(examples):\n",
    "    result = tokenizer(examples['sentence'], truncation=True, padding=\"max_length\", max_length=128)\n",
    "    result[\"labels\"] = examples[\"label\"]\n",
    "    return result\n",
    "\n",
    "# Apply preprocessing and remove the original 'label' column\n",
    "encoded_dataset = dataset.map(preprocess_function, batched=True, remove_columns=['label', 'sentence', 'idx'])\n",
    "\n",
    "# Print sample to verify\n",
    "print(\"Sample input:\", encoded_dataset[\"train\"][0])\n",
    "print(\"Keys in sample:\", encoded_dataset[\"train\"][0].keys())\n",
    "\n",
    "# Modify the DistillationTrainer class\n",
    "class DistillationTrainer(Trainer):\n",
    "    def __init__(self, *args, teacher_model=None, alpha=0.5, temperature=2.0, **kwargs):\n",
    "        super().__init__(*args, **kwargs)\n",
    "        self.teacher_model = teacher_model\n",
    "        self.alpha = alpha\n",
    "        self.temperature = temperature\n",
    "\n",
    "    def compute_loss(self, model, inputs, return_outputs=False):\n",
    "        # Ensure inputs are on the correct device\n",
    "        inputs = {k: v.to(model.device) for k, v in inputs.items()}\n",
    "\n",
    "        # Extract labels\n",
    "        labels = inputs.pop(\"labels\")\n",
    "\n",
    "        # Forward pass through the model\n",
    "        outputs = model(**inputs)\n",
    "        student_logits = outputs.logits\n",
    "\n",
    "        # Teacher model forward pass\n",
    "        with torch.no_grad():\n",
    "            teacher_outputs = self.teacher_model(**inputs)\n",
    "            teacher_logits = teacher_outputs.logits\n",
    "\n",
    "        # Compute distillation loss\n",
    "        loss = distillation_loss(student_logits, teacher_logits, labels, self.alpha, self.temperature)\n",
    "\n",
    "        if return_outputs:\n",
    "            outputs[\"loss\"] = loss\n",
    "            return (loss, outputs)\n",
    "        return loss\n",
    "\n",
    "# Compute metrics function\n",
    "def compute_metrics(eval_pred):\n",
    "    logits, labels = eval_pred\n",
    "    predictions = np.argmax(logits, axis=-1)\n",
    "    return {\"accuracy\": accuracy_score(labels, predictions)}\n",
    "\n",
    "# Training arguments for teacher model fine-tuning\n",
    "teacher_training_args = TrainingArguments(\n",
    "    output_dir=\"./teacher_model_sst2\",\n",
    "    per_device_train_batch_size=32,\n",
    "    per_device_eval_batch_size=64,\n",
    "    num_train_epochs=3,\n",
    "    evaluation_strategy=\"epoch\",\n",
    "    logging_dir=\"./teacher_logs_sst2\",\n",
    "    save_total_limit=1,\n",
    "    save_steps=1000,\n",
    "    learning_rate=2e-5,\n",
    "    weight_decay=0.01,\n",
    ")\n",
    "\n",
    "# Trainer for teacher model\n",
    "teacher_trainer = Trainer(\n",
    "    model=teacher_model,\n",
    "    args=teacher_training_args,\n",
    "    train_dataset=encoded_dataset[\"train\"],\n",
    "    eval_dataset=encoded_dataset[\"validation\"],\n",
    "    compute_metrics=compute_metrics,\n",
    ")\n",
    "\n",
    "# Evaluate the teacher model before fine-tuning\n",
    "teacher_results = teacher_trainer.evaluate()\n",
    "print(f\"Pre Fine-tuned Teacher Model Performance: {teacher_results}\")\n",
    "\n",
    "# Fine-tune the teacher model\n",
    "print(\"Fine-tuning the teacher model...\")\n",
    "teacher_trainer.train()\n",
    "\n",
    "# Evaluate the fine-tuned teacher model\n",
    "teacher_results = teacher_trainer.evaluate()\n",
    "print(f\"Fine-tuned Teacher Model Performance: {teacher_results}\")\n",
    "\n",
    "# Save the fine-tuned teacher model\n",
    "teacher_model.save_pretrained(\"./fine_tuned_teacher_model_sst2\")\n",
    "\n",
    "# Training arguments for distillation\n",
    "training_args = TrainingArguments(\n",
    "    output_dir=\"./distilled_model_sst2\",\n",
    "    per_device_train_batch_size=32,\n",
    "    per_device_eval_batch_size=64,\n",
    "    num_train_epochs=3,\n",
    "    evaluation_strategy=\"epoch\",\n",
    "    logging_dir=\"./logs_sst2\",\n",
    "    save_total_limit=2,\n",
    "    save_steps=1000,\n",
    "    learning_rate=5e-5,\n",
    "    weight_decay=0.01,\n",
    "    dataloader_pin_memory=True,\n",
    ")\n",
    "\n",
    "# Trainer instance for distillation (using the fine-tuned teacher model)\n",
    "distillation_trainer = DistillationTrainer(\n",
    "    model=student_model,\n",
    "    args=training_args,\n",
    "    train_dataset=encoded_dataset[\"train\"],\n",
    "    eval_dataset=encoded_dataset[\"validation\"],\n",
    "    data_collator=data_collator,\n",
    "    tokenizer=tokenizer,\n",
    "    teacher_model=teacher_model,\n",
    "    compute_metrics=compute_metrics\n",
    ")\n",
    "\n",
    "# Evaluate the student model before distillation\n",
    "pre_distillation_results = distillation_trainer.evaluate()\n",
    "print(f\"Pre-distillation Student Model Performance: {pre_distillation_results}\")\n",
    "\n",
    "# Train the student model (distillation)\n",
    "print(\"Starting distillation...\")\n",
    "distillation_trainer.train()\n",
    "\n",
    "# Evaluate the distilled student model\n",
    "post_distillation_results = distillation_trainer.evaluate()\n",
    "print(f\"Post-distillation Student Model Performance: {post_distillation_results}\")\n",
    "\n",
    "# Save the distilled student model\n",
    "student_model.save_pretrained(\"./distilled_student_model_sst2\")\n",
    "tokenizer.save_pretrained(\"./distilled_student_model_sst2\")\n",
    "\n",
    "# Final comparison\n",
    "print(\"\\nPerformance Comparison:\")\n",
    "print(f\"Fine-tuned Teacher Model: {teacher_results}\")\n",
    "print(f\"Pre-distillation Student Model: {pre_distillation_results}\")\n",
    "print(f\"Post-distillation Student Model: {post_distillation_results}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d5f5163d",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
