{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "28e4c4d1-a73f-437b-a1bd-c2cc3874924a" }, "source": [ "# 강의 11주차: midm-food-order-understanding\n", "\n", "1. KT-AI/midm-bitext-S-7B-inst-v1 를 주문 문장 이해에 미세 튜닝\n", "\n", "- food-order-understanding-small-3200.json (학습)\n", "- food-order-understanding-small-800.json (검증)\n", "\n", "\n", "종속적인 필요 내용\n", "- huggingface 계정 설정 및 llama-2 사용 승인\n", "- 로깅을 위한 wandb\n", "\n", "\n", "history\n", "\n", "v1.2\n", "- KT-AI/midm-bitext-S-7B-inst-v1 에 safetensors 포맷이 올라왔기에, 해당 리포에서 받도록 설정 변경\n", "- 전체 과정 재검증" ], "id": "28e4c4d1-a73f-437b-a1bd-c2cc3874924a" }, { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "nDZe_wqKU6J3", "outputId": "f5eafd9b-f24e-4d79-a260-de4f8cf0071a" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (4.35.2)\n", "Collecting peft\n", " Downloading peft-0.7.0-py3-none-any.whl (168 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m168.3/168.3 kB\u001b[0m \u001b[31m4.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting accelerate\n", " Downloading accelerate-0.25.0-py3-none-any.whl (265 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m265.7/265.7 kB\u001b[0m \u001b[31m22.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting optimum\n", " Downloading optimum-1.15.0-py3-none-any.whl (400 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m400.9/400.9 kB\u001b[0m \u001b[31m46.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting bitsandbytes\n", " Downloading bitsandbytes-0.41.3.post1-py3-none-any.whl (92.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.6/92.6 MB\u001b[0m \u001b[31m10.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting trl\n", " Downloading trl-0.7.4-py3-none-any.whl (133 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m133.9/133.9 kB\u001b[0m \u001b[31m20.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting wandb\n", " Downloading wandb-0.16.1-py3-none-any.whl (2.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.1/2.1 MB\u001b[0m \u001b[31m98.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting einops\n", " Downloading einops-0.7.0-py3-none-any.whl (44 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.6/44.6 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers) (3.13.1)\n", "Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.19.4)\n", "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (1.23.5)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers) (23.2)\n", "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (6.0.1)\n", "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (2023.6.3)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers) (2.31.0)\n", "Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.15.0)\n", "Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.4.1)\n", "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers) (4.66.1)\n", "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from peft) (5.9.5)\n", "Requirement already satisfied: torch>=1.13.0 in /usr/local/lib/python3.10/dist-packages (from peft) (2.1.0+cu118)\n", "Collecting coloredlogs (from optimum)\n", " Downloading coloredlogs-15.0.1-py2.py3-none-any.whl (46 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from optimum) (1.12)\n", "Collecting datasets (from optimum)\n", " Downloading datasets-2.15.0-py3-none-any.whl (521 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m521.2/521.2 kB\u001b[0m \u001b[31m43.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tyro>=0.5.11 (from trl)\n", " Downloading tyro-0.6.0-py3-none-any.whl (100 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m100.9/100.9 kB\u001b[0m \u001b[31m14.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: Click!=8.0.0,>=7.1 in /usr/local/lib/python3.10/dist-packages (from wandb) (8.1.7)\n", "Collecting GitPython!=3.1.29,>=1.0.0 (from wandb)\n", " Downloading GitPython-3.1.40-py3-none-any.whl (190 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m190.6/190.6 kB\u001b[0m \u001b[31m23.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting sentry-sdk>=1.0.0 (from wandb)\n", " Downloading sentry_sdk-1.38.0-py2.py3-none-any.whl (252 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m252.8/252.8 kB\u001b[0m \u001b[31m25.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting docker-pycreds>=0.4.0 (from wandb)\n", " Downloading docker_pycreds-0.4.0-py2.py3-none-any.whl (9.0 kB)\n", "Collecting setproctitle (from wandb)\n", " Downloading setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (30 kB)\n", "Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from wandb) (67.7.2)\n", "Requirement already satisfied: appdirs>=1.4.3 in /usr/local/lib/python3.10/dist-packages (from wandb) (1.4.4)\n", "Requirement already satisfied: protobuf!=4.21.0,<5,>=3.19.0 in /usr/local/lib/python3.10/dist-packages (from wandb) (3.20.3)\n", "Requirement already satisfied: six>=1.4.0 in /usr/local/lib/python3.10/dist-packages (from docker-pycreds>=0.4.0->wandb) (1.16.0)\n", "Collecting gitdb<5,>=4.0.1 (from GitPython!=3.1.29,>=1.0.0->wandb)\n", " Downloading gitdb-4.0.11-py3-none-any.whl (62 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.7/62.7 kB\u001b[0m \u001b[31m5.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (2023.6.0)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (4.5.0)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (3.3.2)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (3.6)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2.0.7)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2023.11.17)\n", "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.0->peft) (3.2.1)\n", "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.0->peft) (3.1.2)\n", "Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.0->peft) (2.1.0)\n", "Collecting sentencepiece!=0.1.92,>=0.1.91 (from transformers)\n", " Downloading sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m60.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting docstring-parser>=0.14.1 (from tyro>=0.5.11->trl)\n", " Downloading docstring_parser-0.15-py3-none-any.whl (36 kB)\n", "Requirement already satisfied: rich>=11.1.0 in /usr/local/lib/python3.10/dist-packages (from tyro>=0.5.11->trl) (13.7.0)\n", "Collecting shtab>=1.5.6 (from tyro>=0.5.11->trl)\n", " Downloading shtab-1.6.5-py3-none-any.whl (13 kB)\n", "Collecting humanfriendly>=9.1 (from coloredlogs->optimum)\n", " Downloading humanfriendly-10.0-py2.py3-none-any.whl (86 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m12.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: pyarrow>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets->optimum) (9.0.0)\n", "Collecting pyarrow-hotfix (from datasets->optimum)\n", " Downloading pyarrow_hotfix-0.6-py3-none-any.whl (7.9 kB)\n", "Collecting dill<0.3.8,>=0.3.0 (from datasets->optimum)\n", " Downloading dill-0.3.7-py3-none-any.whl (115 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m17.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets->optimum) (1.5.3)\n", "Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets->optimum) (3.4.1)\n", "Collecting multiprocess (from datasets->optimum)\n", " Downloading multiprocess-0.70.15-py310-none-any.whl (134 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m18.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets->optimum) (3.9.1)\n", "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->optimum) (1.3.0)\n", "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets->optimum) (23.1.0)\n", "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets->optimum) (6.0.4)\n", "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets->optimum) (1.9.3)\n", "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets->optimum) (1.4.0)\n", "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets->optimum) (1.3.1)\n", "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets->optimum) (4.0.3)\n", "Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->GitPython!=3.1.29,>=1.0.0->wandb)\n", " Downloading smmap-5.0.1-py3-none-any.whl (24 kB)\n", "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich>=11.1.0->tyro>=0.5.11->trl) (3.0.0)\n", "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich>=11.1.0->tyro>=0.5.11->trl) (2.16.1)\n", "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.13.0->peft) (2.1.3)\n", "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets->optimum) (2.8.2)\n", "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets->optimum) (2023.3.post1)\n", "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich>=11.1.0->tyro>=0.5.11->trl) (0.1.2)\n", "Installing collected packages: sentencepiece, bitsandbytes, smmap, shtab, setproctitle, sentry-sdk, pyarrow-hotfix, humanfriendly, einops, docstring-parser, docker-pycreds, dill, multiprocess, gitdb, coloredlogs, tyro, GitPython, accelerate, wandb, datasets, trl, peft, optimum\n", "Successfully installed GitPython-3.1.40 accelerate-0.25.0 bitsandbytes-0.41.3.post1 coloredlogs-15.0.1 datasets-2.15.0 dill-0.3.7 docker-pycreds-0.4.0 docstring-parser-0.15 einops-0.7.0 gitdb-4.0.11 humanfriendly-10.0 multiprocess-0.70.15 optimum-1.15.0 peft-0.7.0 pyarrow-hotfix-0.6 sentencepiece-0.1.99 sentry-sdk-1.38.0 setproctitle-1.3.3 shtab-1.6.5 smmap-5.0.1 trl-0.7.4 tyro-0.6.0 wandb-0.16.1\n" ] } ], "source": [ "pip install transformers peft accelerate optimum bitsandbytes trl wandb einops" ], "id": "nDZe_wqKU6J3" }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "51eb00d7-2928-41ad-9ae9-7f0da7d64d6d", "outputId": "5e0dc56b-95ed-4008-ee80-cc7c131a30a9" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.10/dist-packages/trl/trainer/ppo_config.py:141: UserWarning: The `optimize_cuda_cache` arguement will be deprecated soon, please use `optimize_device_cache` instead.\n", " warnings.warn(\n" ] } ], "source": [ "import os\n", "from dataclasses import dataclass, field\n", "from typing import Optional\n", "import re\n", "\n", "import torch\n", "import tyro\n", "from accelerate import Accelerator\n", "from datasets import load_dataset, Dataset\n", "from peft import AutoPeftModelForCausalLM, LoraConfig\n", "from tqdm import tqdm\n", "from transformers import (\n", " AutoModelForCausalLM,\n", " AutoTokenizer,\n", " BitsAndBytesConfig,\n", " TrainingArguments,\n", ")\n", "\n", "from trl import SFTTrainer\n", "\n", "from trl.trainer import ConstantLengthDataset" ], "id": "51eb00d7-2928-41ad-9ae9-7f0da7d64d6d" }, { "cell_type": "code", "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": [ "19ab06a9c77949deb5d895f3960c7936", "d49698f512a94ddcb3e95d555b32815e", "c8cc0bb42794455b950d8002dca66831", "12e4579291b14280a57d507e8d683258", "ea5f11e553144f0ea85c562dd00114a5", "cc37abdcfbfc4963b6a432b6a17e27d5", "b774019fa1344b5dab82b05200f2e842", "6543b74164434bcf969d80aeb6e20b0e", "5c6cd6a747344866868ad6734d921bcb", "af65e983558e489aa3bac902931473d1", "cee02bd8ea0c4c90b77ac1819f1a6566", "0d883aa7040f4290abf85c3e33f9ce4f", "10136afec3ba4a9692dba5b244ff3338", "931d0fada0a14aab81b0f2b2466df5fd", "f4ca565564774bf7930e7e65e0d042ae", "f3d74e756dfb4a2ea51134216879810b", "f9e56abdf7d84ceda732df869862afc2", "2c6c6be9c8ce47ac8d360d1010a9239d", "bbb5aa17e2b14da3b670ec89deeec57c", "0ddc6bfb294843e5941ae7075d21c98b", "846c79b8fe604cdbb600a452b0b28890", "437522ee9a05480f963c02533b70c155", "96c40a82d6904a56b276a8bff5191bce", "802d6b5803c140b2a97ed4ab95320858", "dd5081c5bb0041418ff07c40bf0a4226", "6e290d8144924ba8826f18254975ebd7", "55333d6acaf94557a41857a286ee2ae3", "7ef70cedb6c64f508ba3ed1391875899", "38f6d70290a14dfdaec394fae7f0e026", "8e46fc712d574626af5f92fad3a1f719", "4a3b4746a8234656b08a19abb6e79879", "fd4cf399c58c4c61be021ff7b9859eb8" ] }, "id": "tX7gYxZaVhYL", "outputId": "89626642-cd90-40c0-8a2c-c54505dc3faa" }, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "VBox(children=(HTML(value='
/content/wandb/run-20231125_070528-q7nwfg3b
"
]
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"Step | \n", "Training Loss | \n", "
---|---|
50 | \n", "1.042000 | \n", "
100 | \n", "0.549300 | \n", "
150 | \n", "0.504900 | \n", "
200 | \n", "0.496100 | \n", "
250 | \n", "0.518200 | \n", "
300 | \n", "0.497700 | \n", "
"
]
},
"metadata": {}
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"TrainOutput(global_step=300, training_loss=0.6013818422953288, metrics={'train_runtime': 937.6794, 'train_samples_per_second': 0.64, 'train_steps_per_second': 0.32, 'total_flos': 9315508499251200.0, 'train_loss': 0.6013818422953288, 'epoch': 0.19})"
]
},
"metadata": {},
"execution_count": 31
}
],
"source": [
"trainer.train()"
],
"id": "14019fa9-0c6f-4729-ac99-0d407af375b8"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 36
},
"id": "3Y4FQSyRghQt",
"outputId": "a97204d2-e42f-46ef-c954-09f6f0cda6ca"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"'/gdrive/MyDrive/Lectures/2023/nlp/lora-midm-7b-food-order-understanding'"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
}
},
"metadata": {},
"execution_count": 32
}
],
"source": [
"script_args.training_args.output_dir"
],
"id": "3Y4FQSyRghQt"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "49f05450-da2a-4edd-9db2-63836a0ec73a"
},
"outputs": [],
"source": [
"trainer.save_model(script_args.training_args.output_dir)"
],
"id": "49f05450-da2a-4edd-9db2-63836a0ec73a"
},
{
"cell_type": "markdown",
"metadata": {
"id": "652f307e-e1d7-43ae-b083-dba2d94c2296"
},
"source": [
"# 추론 테스트"
],
"id": "652f307e-e1d7-43ae-b083-dba2d94c2296"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ea8a1fea-7499-4386-9dea-0509110f61af"
},
"outputs": [],
"source": [
"from transformers import pipeline, TextStreamer"
],
"id": "ea8a1fea-7499-4386-9dea-0509110f61af"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "52626888-1f6e-46b6-a8dd-836622149ff5"
},
"outputs": [],
"source": [
"instruction_prompt_template = \"\"\"###System;다음은 매장에서 고객이 음식을 주문하는 주문 문장이다. 이를 분석하여 음식명, 옵션명, 수량을 추출하여 고객의 의도를 이해하고자 한다.\n",
"분석 결과를 완성해주기 바란다.\n",
"\n",
"### 주문 문장: {0} ### 분석 결과:\n",
"\"\"\"\n",
"\n",
"prompt_template = \"\"\"###System;{System}\n",
"###User;{User}\n",
"###Midm;\"\"\"\n",
"\n",
"default_system_msg = (\n",
" \"너는 먼저 사용자가 입력한 주문 문장을 분석하는 에이전트이다. 이로부터 주문을 구성하는 음식명, 옵션명, 수량을 차례대로 추출해야 한다.\"\n",
")"
],
"id": "52626888-1f6e-46b6-a8dd-836622149ff5"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "46e844fa-8f63-4359-a4fb-df66e8171796"
},
"outputs": [],
"source": [
"evaluation_queries = [\n",
" \"오늘은 비가오니깐 이거 먹자. 삼선짬뽕 곱배기 하나하구요, 사천 탕수육 중짜 한그릇 주세요.\",\n",
" \"아이스아메리카노 톨사이즈 한잔 하고요. 딸기스무디 한잔 주세요. 또, 콜드브루라떼 하나요.\",\n",
" \"참이슬 한병, 코카콜라 1.5리터 한병, 테슬라 한병이요.\",\n",
" \"꼬막무침 1인분하고요, 닭도리탕 중자 주세요. 그리고 소주도 한병 주세요.\",\n",
" \"김치찌개 3인분하고요, 계란말이 주세요.\",\n",
" \"불고기버거세트 1개하고요 감자튀김 추가해주세요.\",\n",
" \"불닭볶음면 1개랑 사리곰탕면 2개 주세요.\",\n",
" \"카페라떼 아이스 샷추가 한잔하구요. 스콘 하나 주세요\",\n",
" \"여기요 춘천닭갈비 4인분하고요. 라면사리 추가하겠습니다. 콜라 300ml 두캔주세요.\",\n",
" \"있잖아요 조랭이떡국 3인분하고요. 떡만두 한세트 주세요.\",\n",
" \"깐풍탕수 2인분 하고요 콜라 1.5리터 한병이요.\",\n",
"]"
],
"id": "46e844fa-8f63-4359-a4fb-df66e8171796"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "1919cf1f-482e-4185-9d06-e3cea1918416"
},
"outputs": [],
"source": [
"def wrapper_generate(model, input_prompt, do_stream=False):\n",
" data = tokenizer(input_prompt, return_tensors=\"pt\")\n",
" streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n",
" input_ids = data.input_ids[..., :-1]\n",
" with torch.no_grad():\n",
" pred = model.generate(\n",
" input_ids=input_ids.cuda(),\n",
" streamer=streamer if do_stream else None,\n",
" use_cache=True,\n",
" max_new_tokens=float('inf'),\n",
" do_sample=False\n",
" )\n",
" decoded_text = tokenizer.batch_decode(pred, skip_special_tokens=True)\n",
" decoded_text = decoded_text[0].replace(\"<[!newline]>\", \"\\n\")\n",
" return (decoded_text[len(input_prompt):])"
],
"id": "1919cf1f-482e-4185-9d06-e3cea1918416"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "eaac1f6f-c823-4488-8edb-2f931ddf0daa",
"outputId": "930fa82a-0858-44d3-ef83-f688ccf80c1b"
},
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py:1473: UserWarning: You have modified the pretrained model configuration to control generation. This is a deprecated strategy to control generation and will be removed soon, in a future version. Please use and modify the model generation configuration (see https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )\n",
" warnings.warn(\n"
]
}
],
"source": [
"eval_dic = {i:wrapper_generate(model=base_model, input_prompt=prompt_template.format(System=default_system_msg, User=evaluation_queries[i]))for i, query in enumerate(evaluation_queries)}"
],
"id": "eaac1f6f-c823-4488-8edb-2f931ddf0daa"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "fefd04ba-2ed8-4f84-bdd0-86d52b3f39f6",
"outputId": "7411d306-e523-4a41-865b-d02a53608245"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"- 분석 결과 0: 음식명:삼선짬뽕, 옵션:곱배기, 수량:하나\n",
"- 분석 결과 1: 음식명:사천 탕수육, 옵션:중짜, 수량:한그릇\n"
]
}
],
"source": [
"print(eval_dic[0])"
],
"id": "fefd04ba-2ed8-4f84-bdd0-86d52b3f39f6"
},
{
"cell_type": "markdown",
"metadata": {
"id": "3f471e3a-723b-4df5-aa72-46f571f6bab6"
},
"source": [
"# 미세튜닝된 모델 로딩 후 테스트"
],
"id": "3f471e3a-723b-4df5-aa72-46f571f6bab6"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "a43bdd07-7555-42b2-9888-a614afec892f"
},
"outputs": [],
"source": [
"bnb_config = BitsAndBytesConfig(\n",
" load_in_4bit=True,\n",
" bnb_4bit_quant_type=\"nf4\",\n",
" bnb_4bit_compute_dtype=torch.bfloat16,\n",
")"
],
"id": "a43bdd07-7555-42b2-9888-a614afec892f"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 49,
"referenced_widgets": [
"7b86e0db29d64b44bb2e7feb52958679",
"dbe8587bc57d49f7bd31e1e764a3c1d9",
"b65474856f654107986b92c2781676d2",
"f85e191cdff3450496c6c6aacd08ed7d",
"04ba6c7451784c9bb49e8ec16cc16440",
"5ef9c8dc73614ffa9d14118ed64c85c7",
"da1e45c3cc1c49758b7c24c35c31751b",
"2afbbc0d3f0949c8bbb56e666d2cfa82",
"4a88a1ed8ed8405b978477b2d11ba3b4",
"ee5b8d93923b4875bf2f4863ca2f5628",
"3f215b1ef9d24b45a6b161e019403325"
]
},
"id": "39db2ee4-23c8-471f-89b2-bca34964bf81",
"outputId": "ad6582fd-8378-4170-f099-2dcf4f9fa441"
},
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
"Loading checkpoint shards: 0%| | 0/2 [00:00, ?it/s]"
],
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "7b86e0db29d64b44bb2e7feb52958679"
}
},
"metadata": {}
}
],
"source": [
"trained_model = AutoPeftModelForCausalLM.from_pretrained(\n",
" script_args.training_args.output_dir,\n",
" quantization_config=bnb_config,\n",
" device_map=\"auto\",\n",
" cache_dir=script_args.cache_dir,\n",
" trust_remote_code=True,\n",
")"
],
"id": "39db2ee4-23c8-471f-89b2-bca34964bf81"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "b0b75ca4-730d-4bde-88bb-a86462a76d52"
},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(\n",
" script_args.model_name,\n",
" trust_remote_code=True,\n",
" cache_dir=script_args.cache_dir,\n",
")\n",
"\n",
"if getattr(tokenizer, \"pad_token\", None) is None:\n",
" tokenizer.pad_token = tokenizer.eos_token\n",
"tokenizer.padding_side = \"right\" # Fix weird overflow issue with fp16 training\n",
"\n",
"tokenizer.add_special_tokens(dict(bos_token=''))\n",
"\n",
"trained_model.config.pad_token_id = tokenizer.pad_token_id\n",
"trained_model.config.bos_token_id = tokenizer.bos_token_id"
],
"id": "b0b75ca4-730d-4bde-88bb-a86462a76d52"
},
{
"cell_type": "markdown",
"metadata": {
"id": "X1tRCa4EiYXp"
},
"source": [
"추론 과정에서는 GPU 메모리를 약 5.5 GB 활용"
],
"id": "X1tRCa4EiYXp"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"background_save": true,
"base_uri": "https://localhost:8080/"
},
"id": "e374555b-9f8a-4617-8ea7-c1e6ee1b2999",
"outputId": "526d2827-6422-4399-d7ed-107b822b2bb2"
},
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py:1473: UserWarning: You have modified the pretrained model configuration to control generation. This is a deprecated strategy to control generation and will be removed soon, in a future version. Please use and modify the model generation configuration (see https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )\n",
" warnings.warn(\n"
]
},
{
"output_type": "stream",
"name": "stdout",
"text": [
"- 분석 결과 0: 음식명:삼선짬뽕, 옵션:곱배기, 수량:하나<[!newline]>- 분석 결과 1: 음식명:사천 탕수육, 옵션:중짜, 수량:한그릇\n",
"- 분석 결과 0: 음식명:아이스아메리카노,옵션:톨사이즈,수량:한잔<[!newline]>- 분석 결과 1: 음식명:딸기스무디,수량:한잔<[!newline]>- 분석 결과 2: 음식명:콜드브루라떼,수량:하나\n",
"- 분석 결과 0: 음식명:참이슬,수량:한병<[!newline]>- 분석 결과 1: 음식명:코카콜라,옵션:1.5리터,수량:한병<[!newline]>- 분석 결과 2: 음식명:테슬라,수량:한병\n",
"- 분석 결과 0: 음식명:꼬막무침, 수량:1인분<[!newline]>- 분석 결과 1: 음식명:닭도리탕, 옵션:중자<[!newline]>- 분석 결과 2: 음식명:소주, 수량:한병\n",
"- 분석 결과 0: 음식명:김치찌개, 수량:3인분<[!newline]>- 분석 결과 1: 음식명:계란말이\n",
"- 분석 결과 0: 음식명:불고기버거세트, 수량:1개<[!newline]>- 분석 결과 1: 음식명:감자튀김, 수량:추가\n",
"- 분석 결과 0: "
]
}
],
"source": [
"eval_dic = {i:wrapper_generate(model=trained_model, do_stream=True, input_prompt=prompt_template.format(System=default_system_msg, User=evaluation_queries[i]))for i, query in enumerate(evaluation_queries)}"
],
"id": "e374555b-9f8a-4617-8ea7-c1e6ee1b2999"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "5d055bb0-5e5f-4221-a634-45d903c0f3b5"
},
"outputs": [],
"source": [
"print(eval_dic[0])"
],
"id": "5d055bb0-5e5f-4221-a634-45d903c0f3b5"
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"7b86e0db29d64b44bb2e7feb52958679": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_dbe8587bc57d49f7bd31e1e764a3c1d9",
"IPY_MODEL_b65474856f654107986b92c2781676d2",
"IPY_MODEL_f85e191cdff3450496c6c6aacd08ed7d"
],
"layout": "IPY_MODEL_04ba6c7451784c9bb49e8ec16cc16440"
}
},
"dbe8587bc57d49f7bd31e1e764a3c1d9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_5ef9c8dc73614ffa9d14118ed64c85c7",
"placeholder": "",
"style": "IPY_MODEL_da1e45c3cc1c49758b7c24c35c31751b",
"value": "Loading checkpoint shards: 100%"
}
},
"b65474856f654107986b92c2781676d2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_2afbbc0d3f0949c8bbb56e666d2cfa82",
"max": 2,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_4a88a1ed8ed8405b978477b2d11ba3b4",
"value": 2
}
},
"f85e191cdff3450496c6c6aacd08ed7d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_ee5b8d93923b4875bf2f4863ca2f5628",
"placeholder": "",
"style": "IPY_MODEL_3f215b1ef9d24b45a6b161e019403325",
"value": " 2/2 [01:11<00:00, 34.34s/it]"
}
},
"04ba6c7451784c9bb49e8ec16cc16440": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"5ef9c8dc73614ffa9d14118ed64c85c7": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"da1e45c3cc1c49758b7c24c35c31751b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"2afbbc0d3f0949c8bbb56e666d2cfa82": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4a88a1ed8ed8405b978477b2d11ba3b4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"ee5b8d93923b4875bf2f4863ca2f5628": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3f215b1ef9d24b45a6b161e019403325": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"19ab06a9c77949deb5d895f3960c7936": {
"model_module": "@jupyter-widgets/controls",
"model_name": "VBoxModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "VBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "VBoxView",
"box_style": "",
"children": [
"IPY_MODEL_846c79b8fe604cdbb600a452b0b28890",
"IPY_MODEL_437522ee9a05480f963c02533b70c155",
"IPY_MODEL_96c40a82d6904a56b276a8bff5191bce",
"IPY_MODEL_802d6b5803c140b2a97ed4ab95320858"
],
"layout": "IPY_MODEL_b774019fa1344b5dab82b05200f2e842"
}
},
"d49698f512a94ddcb3e95d555b32815e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_6543b74164434bcf969d80aeb6e20b0e",
"placeholder": "",
"style": "IPY_MODEL_5c6cd6a747344866868ad6734d921bcb",
"value": "
Copy a token from your Hugging Face\ntokens page and paste it below.
Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file.