{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "cb66e125",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:19:11.618309Z",
     "start_time": "2023-02-20T12:19:08.788876Z"
    }
   },
   "source": [
    "from transformers import AutoTokenizer"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "f03cc2a2",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:23:05.278941Z",
     "start_time": "2023-02-20T12:23:00.707172Z"
    }
   },
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained('distilgpt2', use_fast=True)\n",
    "tokenizer.batch_encode_plus([\n",
    "    'hide new secretions from the parental units',\n",
    "    'this moive is great'\n",
    "])"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8109bb61",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:23:22.932249Z",
     "start_time": "2023-02-20T12:23:22.927262Z"
    }
   },
   "source": [
    "print(tokenizer)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13c47369",
   "metadata": {},
   "source": [
    "# 预测最后一个词 实际上是一个多分类问题"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "41b987bc",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:26:03.204373Z",
     "start_time": "2023-02-20T12:25:56.868322Z"
    },
    "scrolled": true
   },
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "dataset = load_dataset(path='glue', name='sst2')"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "8285bed7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:28:33.881303Z",
     "start_time": "2023-02-20T12:28:33.865261Z"
    },
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "source": [
    "dataset"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "75a55f28",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:34:52.793883Z",
     "start_time": "2023-02-20T12:34:29.838150Z"
    }
   },
   "source": [
    "def f(data, tokenizer):\n",
    "    return tokenizer.batch_encode_plus(data['sentence'])\n",
    "\n",
    "\n",
    "dataset = dataset.map(f, batched=True, batch_size=1000, num_proc=12, \n",
    "                     remove_columns=['sentence', 'idx', 'label'], \n",
    "                     fn_kwargs={'tokenizer': tokenizer})"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "40e144f0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:35:10.563978Z",
     "start_time": "2023-02-20T12:35:10.547938Z"
    },
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "source": [
    "dataset"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "e7a36355",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:37:35.116544Z",
     "start_time": "2023-02-20T12:37:26.622802Z"
    }
   },
   "source": [
    "# 规定一个句子最少要有8个单词. \n",
    "# 过滤掉太短的句子\n",
    "def f(data):\n",
    "    return [len(i) >= 8 for i in data['input_ids']]\n",
    "\n",
    "dataset = dataset.filter(f, batched=True, batch_size=1000, num_proc=12)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "66c8c461",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:37:38.542380Z",
     "start_time": "2023-02-20T12:37:38.525425Z"
    }
   },
   "source": [
    "dataset"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "7fbde696",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:42:53.581009Z",
     "start_time": "2023-02-20T12:42:44.710040Z"
    }
   },
   "source": [
    "# 截断句子\n",
    "def f(data):\n",
    "    data['input_ids'] = [i[:8] for i in data['input_ids']]\n",
    "    data['attention_mask'] = [[1] * 8] * len(data['attention_mask'])\n",
    "    # 模型帮我们做了偏移量问题, 这里输入和输出保持一致即可. \n",
    "    data['labels'] = data['input_ids']\n",
    "    return data\n",
    "\n",
    "dataset = dataset.map(f, batched=True, batch_size=1000, num_proc=12)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "65f3b2b2",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:43:04.096926Z",
     "start_time": "2023-02-20T12:43:04.085955Z"
    },
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "source": [
    "dataset"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "ebbde5a7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:43:14.847694Z",
     "start_time": "2023-02-20T12:43:14.834729Z"
    },
    "scrolled": true
   },
   "source": [
    "dataset['train'][0]"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "43808a26",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:46:34.086991Z",
     "start_time": "2023-02-20T12:46:32.641816Z"
    }
   },
   "source": [
    "# 定义数据加载器\n",
    "import torch\n",
    "from transformers.data.data_collator import default_data_collator\n",
    "\n",
    "loader = torch.utils.data.DataLoader(\n",
    "    dataset=dataset['train'],\n",
    "    batch_size=16,\n",
    "    collate_fn=default_data_collator,\n",
    "    shuffle=True,\n",
    "    drop_last=True\n",
    ")\n",
    "\n",
    "for data in loader:\n",
    "    break\n",
    "    \n",
    "len(loader), data"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "de61f1c7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:47:44.241605Z",
     "start_time": "2023-02-20T12:47:44.171790Z"
    }
   },
   "source": [
    "from transformers import AutoModelForCausalLM, GPT2Model"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "5eaf56d2",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T12:50:34.252699Z",
     "start_time": "2023-02-20T12:50:34.238737Z"
    }
   },
   "source": [
    "tokenizer.vocab_size * 768"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "866dbf0a",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:04:28.849025Z",
     "start_time": "2023-02-20T13:04:23.701287Z"
    }
   },
   "source": [
    "class Model(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        # 简单写法 model = AutoModelForCausalLM.from_pretrained('distilgpt2')\n",
    "        self.pretrained = GPT2Model.from_pretrained('distilgpt2')\n",
    "        self.fc = torch.nn.Linear(768, tokenizer.vocab_size, bias=False)\n",
    "        \n",
    "        # 给fc这一层加载预训练权重\n",
    "        parameters = AutoModelForCausalLM.from_pretrained('distilgpt2')\n",
    "        self.fc.load_state_dict(parameters.lm_head.state_dict())\n",
    "        \n",
    "        self.criterion = torch.nn.CrossEntropyLoss()\n",
    "        \n",
    "    def forward(self, input_ids, attention_mask, labels=None):\n",
    "        logits = self.pretrained(input_ids=input_ids, attention_mask=attention_mask)\n",
    "        logits = logits.last_hidden_state\n",
    "        logits = self.fc(logits)\n",
    "        \n",
    "        loss = None\n",
    "        if labels is not None:\n",
    "            shift_logits = logits[:, :-1].reshape(-1, tokenizer.vocab_size)\n",
    "            shift_labels = labels[:, 1:].reshape(-1)\n",
    "            loss = self.criterion(shift_logits, shift_labels)\n",
    "        return {'loss': loss, 'logits': logits}\n",
    "    \n",
    "model = Model()\n",
    "# 参数量\n",
    "print(sum(i.numel() for i in model.parameters()) / 10000)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "e1af0346",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:04:30.628309Z",
     "start_time": "2023-02-20T13:04:30.458719Z"
    },
    "scrolled": true
   },
   "source": [
    "# python中** 的用法, 可以自动把一个字典解包成关键词参数{} -> xxx =xxx, xxx=xxx\n",
    "out = model(**data)\n",
    "print(out['loss'], out['logits'].shape)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "b4f301a3",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:43:23.610475Z",
     "start_time": "2023-02-20T13:43:23.602496Z"
    }
   },
   "source": [
    "# 测试代码\n",
    "def test(model):\n",
    "    model.eval()\n",
    "    \n",
    "    # 加载测试数据\n",
    "    loader_test = torch.utils.data.DataLoader(\n",
    "        dataset=dataset['test'],\n",
    "        batch_size=16,\n",
    "        collate_fn=default_data_collator,\n",
    "        shuffle=True,\n",
    "        drop_last=True\n",
    "    )\n",
    "    \n",
    "    correct = 0\n",
    "    total = 0\n",
    "    for i, data in enumerate(loader_test):\n",
    "        # 只计算最后一个词的准确率. \n",
    "        label = data['input_ids'][:, -1].clone()\n",
    "        # 再从数据中抹除最后一个词, 防止模型左闭. \n",
    "        data['input_ids'][:, -1] = 0\n",
    "        # label就不需要了\n",
    "        data['labels'][:, :]  = 0\n",
    "        \n",
    "        # 计算\n",
    "        with torch.no_grad():\n",
    "            out = model(**data)\n",
    "            \n",
    "        # 最后一个词的准确率, 因为有偏移量的关系, 这里取的是倒数第二个词\n",
    "        out = out['logits'].argmax(dim=2)[:, -2]\n",
    "        correct += (label==out).sum().item()\n",
    "        total += 16\n",
    "        \n",
    "        if i % 10 == 0:\n",
    "            print(i)\n",
    "            print(label)\n",
    "            print(out)\n",
    "            \n",
    "        if i == 50:\n",
    "            break\n",
    "            \n",
    "    print('accuracy: ', correct / total)\n",
    "    \n",
    "    for i in range(8):\n",
    "        print(tokenizer.decode(data['input_ids'][i, :-1]))\n",
    "        print(tokenizer.decode(label[i]), tokenizer.decode(out[i]))\n",
    "        print()"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "1b0dfa37",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:17:38.003317Z",
     "start_time": "2023-02-20T13:17:29.157309Z"
    },
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "source": [
    "# 没有经过我们的数据训练的模型, 只是用了预训练的权重, 就达到了20%的准确率. \n",
    "test(model)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "7be8612e",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:20:08.060957Z",
     "start_time": "2023-02-20T13:20:08.010093Z"
    }
   },
   "source": [
    "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n",
    "device"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "170af58c",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:21:24.514193Z",
     "start_time": "2023-02-20T13:21:24.490256Z"
    }
   },
   "source": [
    "from transformers import AdamW\n",
    "from transformers.optimization import get_scheduler\n"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "20ffcb73",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:33:42.651893Z",
     "start_time": "2023-02-20T13:33:42.643914Z"
    }
   },
   "source": [
    "# 训练\n",
    "def train():\n",
    "    optimizer = AdamW(model.parameters(), lr=2e-5)\n",
    "    scheduler = get_scheduler(name='linear',\n",
    "                              num_warmup_steps=0,\n",
    "                              num_training_steps=len(loader),\n",
    "                              optimizer=optimizer)\n",
    "    \n",
    "    model.to(device)\n",
    "    model.train()\n",
    "    for i, data in enumerate(loader):\n",
    "        input_ids, attention_mask, labels = data['input_ids'], data['attention_mask'], data['labels']\n",
    "        input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)\n",
    "        out = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)\n",
    "        loss = out['loss']\n",
    "        \n",
    "        loss.backward()\n",
    "        # 为了训练稳定, 进行梯度裁剪\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), 1)\n",
    "        \n",
    "        optimizer.step()\n",
    "        scheduler.step()\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        model.zero_grad()\n",
    "        \n",
    "        if i % 50 == 0:\n",
    "            labels = labels[:, 1:]\n",
    "            out = out['logits'].argmax(dim=2)[:, :-1]\n",
    "            correct = (labels == out).sum().item()\n",
    "            accuracy = correct / (16 * 7)\n",
    "            \n",
    "            lr = optimizer.state_dict()['param_groups'][0]['lr']\n",
    "            \n",
    "            print(i, loss.item(), accuracy, lr)\n",
    "            "
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "d9310cb0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:36:28.445416Z",
     "start_time": "2023-02-20T13:33:51.416445Z"
    },
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "source": [
    "train()"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "e3bd8cae",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:42:02.833209Z",
     "start_time": "2023-02-20T13:42:01.790996Z"
    }
   },
   "source": [
    "torch.save(model, '../data/预测最后一个词.model')"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "6298b373",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:43:06.400958Z",
     "start_time": "2023-02-20T13:43:06.208472Z"
    }
   },
   "source": [
    "model2 = torch.load('../data/预测最后一个词.model', map_location='cpu')"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "646fb648",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-20T13:43:42.027299Z",
     "start_time": "2023-02-20T13:43:33.001352Z"
    }
   },
   "source": [
    "test(model2)"
   ],
   "outputs": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
