{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 要点\n",
    "1. 不同的head起到不同的作用\n",
    "2. model的输入是数组列表 不是原始字符\n",
    "3. 结合type和类型 查看参数信息\n",
    "4. 分类任务注意替换 `id2label`\n",
    "5. word_ids的用法"
   ],
   "id": "269a8a01b76e0820"
  },
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-05-20T13:32:34.087002Z",
     "start_time": "2025-05-20T13:32:31.138169Z"
    }
   },
   "source": [
    "\n",
    "import transformers\n",
    "\n",
    "from MyHelper import *"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\51165\\.conda\\envs\\e12\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-20T13:32:36.314225Z",
     "start_time": "2025-05-20T13:32:34.201254Z"
    }
   },
   "cell_type": "code",
   "source": "model = transformers.AutoModel.from_pretrained(Config.hfl_rbt3, trust_remote_code=True)",
   "id": "52e1d709a1e6366f",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-20T13:32:36.326124Z",
     "start_time": "2025-05-20T13:32:36.322141Z"
    }
   },
   "cell_type": "code",
   "source": [
    "config = model.config\n",
    "config, type(config)"
   ],
   "id": "c6f1fefd907715c",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(BertConfig {\n",
       "   \"_attn_implementation_autoset\": true,\n",
       "   \"architectures\": [\n",
       "     \"BertForMaskedLM\"\n",
       "   ],\n",
       "   \"attention_probs_dropout_prob\": 0.1,\n",
       "   \"classifier_dropout\": null,\n",
       "   \"directionality\": \"bidi\",\n",
       "   \"hidden_act\": \"gelu\",\n",
       "   \"hidden_dropout_prob\": 0.1,\n",
       "   \"hidden_size\": 768,\n",
       "   \"initializer_range\": 0.02,\n",
       "   \"intermediate_size\": 3072,\n",
       "   \"layer_norm_eps\": 1e-12,\n",
       "   \"max_position_embeddings\": 512,\n",
       "   \"model_type\": \"bert\",\n",
       "   \"num_attention_heads\": 12,\n",
       "   \"num_hidden_layers\": 3,\n",
       "   \"output_past\": true,\n",
       "   \"pad_token_id\": 0,\n",
       "   \"pooler_fc_size\": 768,\n",
       "   \"pooler_num_attention_heads\": 12,\n",
       "   \"pooler_num_fc_layers\": 3,\n",
       "   \"pooler_size_per_head\": 128,\n",
       "   \"pooler_type\": \"first_token_transform\",\n",
       "   \"position_embedding_type\": \"absolute\",\n",
       "   \"torch_dtype\": \"float32\",\n",
       "   \"transformers_version\": \"4.51.3\",\n",
       "   \"type_vocab_size\": 2,\n",
       "   \"use_cache\": true,\n",
       "   \"vocab_size\": 21128\n",
       " },\n",
       " transformers.models.bert.configuration_bert.BertConfig)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-20T13:32:36.666936Z",
     "start_time": "2025-05-20T13:32:36.340641Z"
    }
   },
   "cell_type": "code",
   "source": [
    "sen = \"弱小的我也有大Dreaming!\"\n",
    "tokenizer = transformers.AutoTokenizer.from_pretrained(Config.hfl_rbt3)\n",
    "input_ids = tokenizer(sen, return_tensors=\"pt\")\n",
    "outputs = model(**input_ids)\n",
    "for k in outputs.keys():\n",
    "    print(k, outputs[k].shape)\n",
    "input_ids"
   ],
   "id": "6ad6eda4e7940d52",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "last_hidden_state torch.Size([1, 12, 768])\n",
      "pooler_output torch.Size([1, 768])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'input_ids': tensor([[  101,  2483,  2207,  4638,  2769,   738,  3300,  1920, 10252,  8221,\n",
       "           106,   102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "返回attentions",
   "id": "35d481fa4bf8037c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-20T13:32:37.437382Z",
     "start_time": "2025-05-20T13:32:36.716406Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model = transformers.AutoModel.from_pretrained(Config.hfl_rbt3, trust_remote_code=True, output_attentions=True)\n",
    "outputs = model(**input_ids)\n",
    "outputs.keys()"
   ],
   "id": "5ed96c060a2c379c",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "BertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation=\"eager\"` when loading the model.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "odict_keys(['last_hidden_state', 'pooler_output', 'attentions'])"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "带head的模型调用",
   "id": "1c47705e6c24ea36"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-20T13:32:44.379259Z",
     "start_time": "2025-05-20T13:32:43.619813Z"
    }
   },
   "cell_type": "code",
   "source": [
    "output_attn = True\n",
    "clz_model = transformers.AutoModelForSequenceClassification.from_pretrained(Config.hfl_rbt3, output_attentions=output_attn)\n",
    "out = clz_model(**input_ids)\n",
    "print(type(clz_model))\n",
    "if output_attn:\n",
    "    print(out.keys(), out.logits.shape, out.attentions[0].shape, out.attentions[1].shape, out.attentions[2].shape)\n",
    "else:\n",
    "    print(out)"
   ],
   "id": "856f515f973d5c94",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at hfl/rbt3 and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'transformers.models.bert.modeling_bert.BertForSequenceClassification'>\n",
      "odict_keys(['logits', 'attentions']) torch.Size([1, 2]) torch.Size([1, 12, 12, 12]) torch.Size([1, 12, 12, 12]) torch.Size([1, 12, 12, 12])\n"
     ]
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "clz_model.config.num_labels\n",
    "# print(transformers.models.bert.configuration_bert.BertConfig.__doc__)\n",
    "print(transformers.models.bert.modeling_bert.BertForSequenceClassification.__doc__)"
   ],
   "id": "eac21caa3a3c4e8a",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "from torch.utils.data import Dataset\n",
    "import pandas as pd\n",
    "\n",
    "class MyDataset(Dataset):\n",
    "    def __init__(self) -> None:\n",
    "        super().__init__()\n",
    "        self.data = pd.read_csv(\"data/ChnSentiCorp_htl_all.csv\")\n",
    "        self.data = self.data.dropna()\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        return self.data.iloc[index][\"review\"], self.data.iloc[index][\"label\"]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "def collate_func(batch):\n",
    "    texts = [item[0] for item in batch]\n",
    "    labels = [item[1] for item in batch]\n",
    "    inputs = tokenizer(texts, max_length=128, padding=\"max_length\", truncation=True, return_tensors=\"pt\")\n",
    "    inputs[\"labels\"] = torch.tensor(labels)\n",
    "    return inputs\n",
    "\n",
    "train_ds, valid_ds = torch.utils.data.random_split(MyDataset(), lengths=[0.9, 0.1])\n",
    "print(f'train dataset size: {len(train_ds)}, valid dataset size: {len(valid_ds)}')\n",
    "\n",
    "train_dataloader = torch.utils.data.DataLoader(train_ds, batch_size=32, shuffle=True, collate_fn=collate_func)\n",
    "valid_dataloader = torch.utils.data.DataLoader(valid_ds, batch_size=64, shuffle=False, collate_fn=collate_func)\n",
    "print(f'train dataloader size: {len(train_dataloader)}, valid dataloader size: {len(valid_dataloader)}')"
   ],
   "id": "2057d8826e01fa06",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "# next(enumerate(valid_dataloader))[1]\n",
    "# next(iter(valid_dataloader))"
   ],
   "id": "d4508cf7d1bcf9e5",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "model = transformers.AutoModelForSequenceClassification.from_pretrained(Config.hfl_rbt3, trust_remote_code=True)\n",
    "if torch.cuda.is_available():\n",
    "    model = model.cuda()\n",
    "\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)\n",
    "\n",
    "def eval():\n",
    "    model.eval()\n",
    "    acc_num = 0\n",
    "    with torch.inference_mode():\n",
    "        for batch in valid_dataloader:\n",
    "            if torch.cuda.is_available():\n",
    "                batch = {k: v.cuda() for k, v in batch.items()}\n",
    "            output = model(**batch)\n",
    "            pred = torch.argmax(output.logits, dim=-1)\n",
    "            acc_num += (pred.long() == batch[\"labels\"].long()).float().sum()\n",
    "    return acc_num / len(valid_ds)\n",
    "\n",
    "def train(epoch=3, log_step=30):\n",
    "    global_step = 0\n",
    "    for ep in range(epoch):\n",
    "        model.train()\n",
    "        for batch in train_dataloader:\n",
    "            batch = {k: v.to(model.device) for k, v in batch.items()}\n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(**batch)\n",
    "            outputs[\"loss\"].backward()\n",
    "            optimizer.step()\n",
    "            if global_step % log_step == 0:\n",
    "                print(f\"epoch: {ep}, global_step: {global_step}, loss: {outputs['loss'].item()}\")\n",
    "            global_step += 1\n",
    "        acc = eval()\n",
    "        print(f\"epoch: {ep}, acc: {acc}\")\n",
    "\n",
    "train()"
   ],
   "id": "ec5ef703f57c26d1",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "from transformers import pipeline\n",
    "sen = \"我觉得这家酒店不错，饭很好吃！\"\n",
    "id2_label = {0: \"差评！\", 1: \"好评！\"}\n",
    "model.config.id2label = id2_label\n",
    "pipe = pipeline(\"text-classification\", model=model, tokenizer=tokenizer, device=0)\n",
    "pipe(sen)"
   ],
   "id": "873e1e1c1bc3c9c6",
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
