{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    },
    "widgets": {
      "application/vnd.jupyter.widget-state+json": {
        "1a3ccd8dadf3402d896bf0c9dc04fa47": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_39c9a3356b68455dba516ec32053b3bb",
              "IPY_MODEL_f9d8fd50d9734aa6abc920af828b4c6d",
              "IPY_MODEL_879b7ae4ec0f45879f90e3f769d67896"
            ],
            "layout": "IPY_MODEL_22caa164d8c2473bbb999707093aa2af"
          }
        },
        "39c9a3356b68455dba516ec32053b3bb": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_db2b6b6aab394ab5a4aaf1cb74f129ca",
            "placeholder": "​",
            "style": "IPY_MODEL_fa782cb66f5d431fad49ac8ecae17c80",
            "value": "100%"
          }
        },
        "f9d8fd50d9734aa6abc920af828b4c6d": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_8e4e2d11cf814f8bb12331df508cca9e",
            "max": 3,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_54c0e98097654319a7fec8b752c0d717",
            "value": 3
          }
        },
        "879b7ae4ec0f45879f90e3f769d67896": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_076b12597eab4c59b33ad5b53803311e",
            "placeholder": "​",
            "style": "IPY_MODEL_dad79375e4944b2989b7791800a6a946",
            "value": " 3/3 [00:00&lt;00:00, 265.23it/s]"
          }
        },
        "22caa164d8c2473bbb999707093aa2af": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "db2b6b6aab394ab5a4aaf1cb74f129ca": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "fa782cb66f5d431fad49ac8ecae17c80": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "8e4e2d11cf814f8bb12331df508cca9e": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "54c0e98097654319a7fec8b752c0d717": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "076b12597eab4c59b33ad5b53803311e": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "dad79375e4944b2989b7791800a6a946": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        }
      }
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "source": [],
      "metadata": {
        "id": "kyF2cbTdZ-Bn"
      }
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 87,
          "referenced_widgets": [
            "1a3ccd8dadf3402d896bf0c9dc04fa47",
            "39c9a3356b68455dba516ec32053b3bb",
            "f9d8fd50d9734aa6abc920af828b4c6d",
            "879b7ae4ec0f45879f90e3f769d67896",
            "22caa164d8c2473bbb999707093aa2af",
            "db2b6b6aab394ab5a4aaf1cb74f129ca",
            "fa782cb66f5d431fad49ac8ecae17c80",
            "8e4e2d11cf814f8bb12331df508cca9e",
            "54c0e98097654319a7fec8b752c0d717",
            "076b12597eab4c59b33ad5b53803311e",
            "dad79375e4944b2989b7791800a6a946"
          ]
        },
        "id": "Pv56f7UaYWAw",
        "outputId": "da0fc5c5-1f2e-428e-ed3e-326c8ab1ca21"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Found cached dataset tiny_shakespeare (/Users/premtimsina/.cache/huggingface/datasets/tiny_shakespeare/default/1.0.0/b5b13969f09fe8707337f6cb296314fbe06960bd9a868dca39e713e163d27b5e)\n"
          ]
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "  0%|          | 0/3 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "1a3ccd8dadf3402d896bf0c9dc04fa47"
            }
          },
          "metadata": {}
        }
      ],
      "source": [
        "import torch\n",
        "from torch.utils.data import DataLoader, Dataset\n",
        "from datasets import load_dataset\n",
        "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
        "\n",
        "model_name = \"decapoda-research/llama-7b-hf\"\n",
        "model = AutoModelForCausalLM.from_pretrained(model_name)\n",
        "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
        "\n",
        "# Set the padding token\n",
        "tokenizer.pad_token = tokenizer.eos_token\n",
        "\n",
        "# Load the dataset\n",
        "dataset = load_dataset(\"tiny_shakespeare\")\n",
        "'''\n",
        "DatasetDict({\n",
        "    train: Dataset({\n",
        "        features: ['text'],\n",
        "        num_rows: 1\n",
        "    })\n",
        "    validation: Dataset({\n",
        "        features: ['text'],\n",
        "        num_rows: 1\n",
        "    })\n",
        "    test: Dataset({\n",
        "        features: ['text'],\n",
        "        num_rows: 1\n",
        "    })\n",
        "})\n",
        "'''\n",
        "\n",
        "# Split the continuous text into smaller chunks\n",
        "def split_text(text, max_length=100):\n",
        "    return [text[i:i+max_length] for i in range(0, len(text), max_length)]\n",
        "\n",
        "# Apply the split_text function to the dataset\n",
        "\n",
        "\n",
        "split_texts = split_text(dataset[\"train\"][\"text\"][0])\n",
        "\n",
        "# Tokenize the split_texts\n",
        "tokenized_texts = tokenizer(split_texts, return_tensors=\"pt\", padding=True, truncation=True)\n",
        "\n",
        "class ShiftedDataset(Dataset):\n",
        "    def __init__(self, encodings):\n",
        "        self.encodings = encodings\n",
        "\n",
        "    def __getitem__(self, idx):\n",
        "        input_ids = self.encodings[\"input_ids\"][idx]\n",
        "        attention_mask = self.encodings[\"attention_mask\"][idx]\n",
        "        labels = input_ids[1:].tolist() + [tokenizer.eos_token_id]\n",
        "        return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"labels\": torch.tensor(labels)}\n",
        "\n",
        "    def __len__(self):\n",
        "        return len(self.encodings[\"input_ids\"])\n",
        "\n",
        "# Create a DataLoader\n",
        "train_dataset = ShiftedDataset(tokenized_texts)\n",
        "train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=4)\n"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "item=next(iter(train_dataloader))\n",
        "print(item['input_ids'])\n",
        "print(item['attention_mask'])\n",
        "print(item['labels'])"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "kLjkG6s2YjZd",
        "outputId": "8a607c9c-c8a9-4b3b-9b6e-a902365fc499"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "tensor([[  268,   262,   198, 22680,   318,  6157,    11,   290,  3520,    11,\n",
            "           355,   339,  1139,    11,   534,   198,    79,  3832, 10597,   340,\n",
            "           307,  3181,   345,    13,   198,   198, 39371,  3535, 44816,  2937,\n",
            "            25,   198, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256],\n",
            "        [  198,  3792,   477,   262,  2450,    11,  4202,   290,  9366,    11,\n",
            "           198,  2504, 10598,   460,   787,  1028,   606,    13,   198,   198,\n",
            "         49275,  1677,    40,  2937,    25,   198,  4342,  1282,   262,   220,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256],\n",
            "        [  351,  1393,   198,  5189,  3478,  1661,  4274,  4461,   286, 12157,\n",
            "            13,   198,  5247,    11,   788,   616,  2802,    11,   284, 11906,\n",
            "          4957,   467,   198, 12050,  1489, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256],\n",
            "        [   25,   329,   534, 10839,   314,   423,  8350,    26,   198, 10723,\n",
            "          1549,   329,   534, 10839,    26,   329,  3406, 10839,  6842,   198,\n",
            "          5189, 14129,   734,  8667,   267, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256]])\n",
            "tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
            "         1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
            "         0, 0, 0, 0, 0],\n",
            "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
            "         1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
            "         0, 0, 0, 0, 0],\n",
            "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
            "         1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
            "         0, 0, 0, 0, 0],\n",
            "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
            "         1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
            "         0, 0, 0, 0, 0]])\n",
            "tensor([[  262,   198, 22680,   318,  6157,    11,   290,  3520,    11,   355,\n",
            "           339,  1139,    11,   534,   198,    79,  3832, 10597,   340,   307,\n",
            "          3181,   345,    13,   198,   198, 39371,  3535, 44816,  2937,    25,\n",
            "           198, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256],\n",
            "        [ 3792,   477,   262,  2450,    11,  4202,   290,  9366,    11,   198,\n",
            "          2504, 10598,   460,   787,  1028,   606,    13,   198,   198, 49275,\n",
            "          1677,    40,  2937,    25,   198,  4342,  1282,   262,   220, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256],\n",
            "        [ 1393,   198,  5189,  3478,  1661,  4274,  4461,   286, 12157,    13,\n",
            "           198,  5247,    11,   788,   616,  2802,    11,   284, 11906,  4957,\n",
            "           467,   198, 12050,  1489, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256],\n",
            "        [  329,   534, 10839,   314,   423,  8350,    26,   198, 10723,  1549,\n",
            "           329,   534, 10839,    26,   329,  3406, 10839,  6842,   198,  5189,\n",
            "         14129,   734,  8667,   267, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
            "         50256, 50256, 50256]])\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "from accelerate import Accelerator\n",
        "from transformers import GPT2LMHeadModel\n",
        "\n",
        "# Initialize the Accelerator\n",
        "accelerator = Accelerator()\n",
        "\n",
        "# Configure the training arguments\n",
        "num_epochs = 20\n",
        "learning_rate = 5e-5\n",
        "\n",
        "# Initialize the GPT-2 model and optimizer\n",
        "model = GPT2LMHeadModel.from_pretrained(\"gpt2\")\n",
        "optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n",
        "\n",
        "# Prepare the model and optimizer for training with Accelerator\n",
        "model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader)\n"
      ],
      "metadata": {
        "id": "FWMR85OQYtdS"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "num_epochs=40\n",
        "epoch=20"
      ],
      "metadata": {
        "id": "BvY6jfFIlv_E"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "from transformers import AdamW\n",
        "from tqdm import tqdm\n",
        "\n",
        "# Fine-tuning loop\n",
        "for epoch in range(num_epochs):\n",
        "    epoch_iterator = tqdm(train_dataloader, desc=f\"Epoch {epoch + 1}\")\n",
        "    for step, batch in enumerate(epoch_iterator):\n",
        "        optimizer.zero_grad()\n",
        "        input_ids = batch[\"input_ids\"]\n",
        "        attention_mask = batch[\"attention_mask\"]\n",
        "        labels = batch[\"labels\"]\n",
        "\n",
        "        outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)\n",
        "        loss = outputs.loss\n",
        "\n",
        "        accelerator.backward(loss)\n",
        "        optimizer.step()\n",
        "\n",
        "        if step % 500 == 0:\n",
        "            epoch_iterator.set_postfix({\"Loss\": loss.item()}, refresh=True)\n",
        "\n",
        "    # Save the model every 5 epochs\n",
        "    if (epoch + 1) % 5 == 0:\n",
        "        model_save_path = f\"/Users/premtimsina/Documents/bpbbook/chapter6/model/tiny_shakespeare/model_checkpoint_epoch_{epoch + 1}\"\n",
        "        model.save_pretrained(model_save_path)\n",
        "        print(f\"Model saved at epoch {epoch + 1}\")\n",
        "\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "fKdAzfSRgMKu",
        "outputId": "63ce3a8d-d0db-4be4-e456-d72b86a09b25"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 1: 100%|██████████████████| 2510/2510 [06:11<00:00,  6.75it/s, Loss=0.201]\n",
            "Epoch 2: 100%|███████████████████| 2510/2510 [06:08<00:00,  6.80it/s, Loss=0.18]\n",
            "Epoch 3: 100%|██████████████████| 2510/2510 [06:10<00:00,  6.78it/s, Loss=0.103]\n",
            "Epoch 4: 100%|██████████████████| 2510/2510 [06:13<00:00,  6.73it/s, Loss=0.148]\n",
            "Epoch 5: 100%|██████████████████| 2510/2510 [06:31<00:00,  6.41it/s, Loss=0.167]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Model saved at epoch 5\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 6: 100%|██████████████████| 2510/2510 [06:36<00:00,  6.33it/s, Loss=0.123]\n",
            "Epoch 7: 100%|██████████████████| 2510/2510 [06:34<00:00,  6.37it/s, Loss=0.122]\n",
            "Epoch 8: 100%|█████████████████| 2510/2510 [06:26<00:00,  6.49it/s, Loss=0.0685]\n",
            "Epoch 9: 100%|█████████████████| 2510/2510 [06:37<00:00,  6.32it/s, Loss=0.0491]\n",
            "Epoch 10: 100%|█████████████████| 2510/2510 [06:31<00:00,  6.41it/s, Loss=0.144]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Model saved at epoch 10\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 11: 100%|█████████████████| 2510/2510 [06:35<00:00,  6.34it/s, Loss=0.108]\n",
            "Epoch 12: 100%|████████████████| 2510/2510 [06:30<00:00,  6.43it/s, Loss=0.0763]\n",
            "Epoch 13: 100%|█████████████████| 2510/2510 [06:31<00:00,  6.40it/s, Loss=0.145]\n",
            "Epoch 14: 100%|█████████████████| 2510/2510 [06:33<00:00,  6.38it/s, Loss=0.137]\n",
            "Epoch 15: 100%|████████████████| 2510/2510 [06:31<00:00,  6.42it/s, Loss=0.0757]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Model saved at epoch 15\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 16: 100%|██████████████████| 2510/2510 [06:30<00:00,  6.42it/s, Loss=0.12]\n",
            "Epoch 17: 100%|█████████████████| 2510/2510 [06:30<00:00,  6.43it/s, Loss=0.113]\n",
            "Epoch 18: 100%|█████████████████| 2510/2510 [06:29<00:00,  6.44it/s, Loss=0.141]\n",
            "Epoch 19: 100%|████████████████| 2510/2510 [06:30<00:00,  6.42it/s, Loss=0.0813]\n",
            "Epoch 20: 100%|████████████████| 2510/2510 [06:38<00:00,  6.29it/s, Loss=0.0753]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Model saved at epoch 20\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 21: 100%|████████████████| 2510/2510 [06:30<00:00,  6.42it/s, Loss=0.0855]\n",
            "Epoch 22: 100%|██████████████████| 2510/2510 [06:33<00:00,  6.39it/s, Loss=0.12]\n",
            "Epoch 23: 100%|█████████████████| 2510/2510 [06:29<00:00,  6.44it/s, Loss=0.121]\n",
            "Epoch 24: 100%|█████████████████| 2510/2510 [06:36<00:00,  6.32it/s, Loss=0.107]\n",
            "Epoch 25: 100%|█████████████████| 2510/2510 [06:31<00:00,  6.41it/s, Loss=0.105]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Model saved at epoch 25\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 26: 100%|█████████████████| 2510/2510 [06:32<00:00,  6.40it/s, Loss=0.124]\n",
            "Epoch 27: 100%|████████████████| 2510/2510 [06:33<00:00,  6.37it/s, Loss=0.0576]\n",
            "Epoch 28: 100%|████████████████| 2510/2510 [06:36<00:00,  6.34it/s, Loss=0.0918]\n",
            "Epoch 29: 100%|█████████████████| 2510/2510 [06:33<00:00,  6.37it/s, Loss=0.116]\n",
            "Epoch 30: 100%|████████████████| 2510/2510 [06:35<00:00,  6.35it/s, Loss=0.0753]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Model saved at epoch 30\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 31: 100%|█████████████████| 2510/2510 [06:35<00:00,  6.34it/s, Loss=0.117]\n",
            "Epoch 32: 100%|█████████████████| 2510/2510 [06:35<00:00,  6.35it/s, Loss=0.173]\n",
            "Epoch 33: 100%|█████████████████| 2510/2510 [06:32<00:00,  6.39it/s, Loss=0.062]\n",
            "Epoch 34: 100%|████████████████| 2510/2510 [06:32<00:00,  6.39it/s, Loss=0.0628]\n",
            "Epoch 35: 100%|█████████████████| 2510/2510 [06:27<00:00,  6.48it/s, Loss=0.119]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Model saved at epoch 35\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 36: 100%|████████████████| 2510/2510 [06:31<00:00,  6.41it/s, Loss=0.0706]\n",
            "Epoch 37: 100%|█████████████████| 2510/2510 [06:32<00:00,  6.40it/s, Loss=0.119]\n",
            "Epoch 38: 100%|████████████████| 2510/2510 [06:38<00:00,  6.29it/s, Loss=0.0789]\n",
            "Epoch 39: 100%|██████████████████| 2510/2510 [06:31<00:00,  6.41it/s, Loss=0.11]\n",
            "Epoch 40: 100%|████████████████| 2510/2510 [06:34<00:00,  6.36it/s, Loss=0.0707]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Model saved at epoch 40\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "num_epochs=40\n",
        "epoch=20"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "BsoGhgIN90dy",
        "outputId": "18583404-1a97-4d0d-e65d-29b614e2b7be"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "20"
            ]
          },
          "metadata": {},
          "execution_count": 12
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "accelerator.wait_for_everyone()\n",
        "unwrapped_model = accelerator.unwrap_model(model)\n",
        "\n",
        "unwrapped_model.save_pretrained(model_path)\n",
        "tokenizer.save_pretrained(tokenizer_path)\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "n99FX_rwhF_U",
        "outputId": "0f1bd541-5779-462a-df1f-ce99934e7519"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "('/Users/premtimsina/Documents/bpbbook/chapter6/model/tiny_shakespeare/tiny_shakespeare_gpt2_tokenizer/tokenizer_config.json',\n",
              " '/Users/premtimsina/Documents/bpbbook/chapter6/model/tiny_shakespeare/tiny_shakespeare_gpt2_tokenizer/special_tokens_map.json',\n",
              " '/Users/premtimsina/Documents/bpbbook/chapter6/model/tiny_shakespeare/tiny_shakespeare_gpt2_tokenizer/vocab.json',\n",
              " '/Users/premtimsina/Documents/bpbbook/chapter6/model/tiny_shakespeare/tiny_shakespeare_gpt2_tokenizer/merges.txt',\n",
              " '/Users/premtimsina/Documents/bpbbook/chapter6/model/tiny_shakespeare/tiny_shakespeare_gpt2_tokenizer/added_tokens.json')"
            ]
          },
          "metadata": {},
          "execution_count": 31
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import torch\n",
        "\n",
        "def generate_poem(prompt, model_path, tokenizer_path, max_words=50, max_seq_len=100, temperature=1.0):\n",
        "    # Load the fine-tuned model and tokenizer\n",
        "    model = GPT2LMHeadModel.from_pretrained(model_path)\n",
        "    tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_path)\n",
        "\n",
        "    # Set the padding token and padding side\n",
        "    tokenizer.pad_token = tokenizer.eos_token\n",
        "    tokenizer.padding_side = 'left'\n",
        "\n",
        "    poem = \"\"\n",
        "    remaining_words = max_words\n",
        "\n",
        "    while remaining_words > 0:\n",
        "        # Set the prompt and generate the text\n",
        "        input_ids = tokenizer.encode(prompt, return_tensors=\"pt\", padding=True, truncation=True, max_length=max_seq_len)\n",
        "        attention_mask = torch.ones_like(input_ids)\n",
        "\n",
        "        max_tokens = min(remaining_words * 5, max_seq_len)  # Assuming each word has an average of 5 tokens\n",
        "        output_ids = model.generate(\n",
        "            input_ids,\n",
        "            max_length=max_tokens,\n",
        "            num_return_sequences=1,\n",
        "            no_repeat_ngram_size=2,\n",
        "            attention_mask=attention_mask,\n",
        "            pad_token_id=tokenizer.pad_token_id,\n",
        "            temperature=temperature,\n",
        "        )\n",
        "\n",
        "        # Convert the token IDs to text\n",
        "        generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)\n",
        "        poem += generated_text\n",
        "        remaining_words -= len(generated_text.split())\n",
        "\n",
        "        # Update the prompt with the last part of the generated text\n",
        "        prompt = generated_text.split()[-max_seq_len:]\n",
        "\n",
        "    return poem\n",
        "\n",
        "import re\n",
        "\n",
        "def post_process_poem(poem):\n",
        "    # Remove any extra spaces\n",
        "    poem = re.sub(r'\\s+', ' ', poem).strip()\n",
        "\n",
        "    # Capitalize the first letter of each sentence\n",
        "    sentences = re.split(r'(?<=[\\.\\?!])\\s', poem)\n",
        "    formatted_sentences = [sentence.capitalize() for sentence in sentences]\n",
        "    formatted_poem = ' '.join(formatted_sentences)\n",
        "\n",
        "    # Add line breaks for readability\n",
        "    line_breaks = re.compile(r'(?<=[,;:?!])\\s')\n",
        "    formatted_poem = line_breaks.sub('\\n', formatted_poem)\n",
        "\n",
        "    return formatted_poem\n",
        "\n",
        "\n",
        "\n"
      ],
      "metadata": {
        "id": "VYFJjJzthHb4"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# Example usage\n",
        "model_path = '/Users/premtimsina/Documents/bpbbook/chapter6/model/tiny_shakespeare/model_checkpoint_epoch_40'\n",
        "tokenizer_path = 'gpt2'\n",
        "prompt = \"love\"\n",
        "max_words = 50\n",
        "temperature = 0.9  # You can adjust this value for more or less randomness\n",
        "generated_poem = generate_poem(prompt, model_path, tokenizer_path, max_words=max_words, temperature=temperature)\n",
        "formatted_poem = post_process_poem(generated_poem)\n",
        "print(formatted_poem)\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "ciSZ8cUfmU4g",
        "outputId": "3d4099e0-e31a-4c64-b768-8c1b0dd0bd0c"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.\n",
            "A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.\n",
            "A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Love ill,,\n",
            "have let go the;,\n",
            "as remy top flowers by. Hast since have a parting himsh think longelovehaveletgoastopsincehaveathinka of more when was man again look than,ofmorewhenwasmanagainlooka upon err;\n",
            "more at. Ly,,\n",
            "you made work v,\n",
            "virtue moreour.uponmoreyoumadeworkmeni:p you,\n",
            "lord ,\n",
            "am not mile with nor from worship colder i\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [],
      "metadata": {
        "id": "OTS7__h9-qWE"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}