{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "f11db778",
   "metadata": {},
   "source": [
    "\n",
    "这里以中文BERT为例，实现提及聚类："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "84ab4605-9be5-47c5-bed3-e6e9cfd449f7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found existing installation: transformers 4.51.3\n",
      "Uninstalling transformers-4.51.3:\n",
      "  Successfully uninstalled transformers-4.51.3\n",
      "Note: you may need to restart the kernel to use updated packages.\n"
     ]
    }
   ],
   "source": [
    "pip uninstall transformers -y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "bd2fd0d4-9731-4b3b-ba1f-b819542f00fe",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files removed: 2272 (2610.5 MB)\n",
      "Note: you may need to restart the kernel to use updated packages.\n"
     ]
    }
   ],
   "source": [
    "pip cache purge"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8fe27c76-d07b-4d02-bc5c-27598172ec8d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Defaulting to user installation because normal site-packages is not writeable\n",
      "Collecting transformers==4.42.4\n",
      "  Downloading transformers-4.42.4-py3-none-any.whl.metadata (43 kB)\n",
      "Requirement already satisfied: filelock in c:\\programdata\\anaconda3\\lib\\site-packages (from transformers==4.42.4) (3.13.1)\n",
      "Requirement already satisfied: huggingface-hub<1.0,>=0.23.2 in e:\\translations\\python\\python311\\site-packages (from transformers==4.42.4) (0.30.2)\n",
      "Requirement already satisfied: numpy<2.0,>=1.17 in e:\\translations\\python\\python311\\site-packages (from transformers==4.42.4) (1.26.4)\n",
      "Requirement already satisfied: packaging>=20.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from transformers==4.42.4) (23.1)\n",
      "Requirement already satisfied: pyyaml>=5.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from transformers==4.42.4) (6.0.1)\n",
      "Requirement already satisfied: regex!=2019.12.17 in c:\\programdata\\anaconda3\\lib\\site-packages (from transformers==4.42.4) (2023.10.3)\n",
      "Requirement already satisfied: requests in c:\\programdata\\anaconda3\\lib\\site-packages (from transformers==4.42.4) (2.31.0)\n",
      "Requirement already satisfied: safetensors>=0.4.1 in e:\\translations\\python\\python311\\site-packages (from transformers==4.42.4) (0.5.3)\n",
      "Collecting tokenizers<0.20,>=0.19 (from transformers==4.42.4)\n",
      "  Downloading tokenizers-0.19.1-cp311-none-win_amd64.whl.metadata (6.9 kB)\n",
      "Requirement already satisfied: tqdm>=4.27 in c:\\programdata\\anaconda3\\lib\\site-packages (from transformers==4.42.4) (4.65.0)\n",
      "Requirement already satisfied: fsspec>=2023.5.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from huggingface-hub<1.0,>=0.23.2->transformers==4.42.4) (2023.10.0)\n",
      "Requirement already satisfied: typing-extensions>=3.7.4.3 in e:\\translations\\python\\python311\\site-packages (from huggingface-hub<1.0,>=0.23.2->transformers==4.42.4) (4.12.2)\n",
      "Requirement already satisfied: colorama in c:\\programdata\\anaconda3\\lib\\site-packages (from tqdm>=4.27->transformers==4.42.4) (0.4.6)\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\programdata\\anaconda3\\lib\\site-packages (from requests->transformers==4.42.4) (2.0.4)\n",
      "Requirement already satisfied: idna<4,>=2.5 in c:\\programdata\\anaconda3\\lib\\site-packages (from requests->transformers==4.42.4) (3.4)\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from requests->transformers==4.42.4) (1.26.20)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in c:\\programdata\\anaconda3\\lib\\site-packages (from requests->transformers==4.42.4) (2024.12.14)\n",
      "Downloading transformers-4.42.4-py3-none-any.whl (9.3 MB)\n",
      "   ---------------------------------------- 0.0/9.3 MB ? eta -:--:--\n",
      "   - -------------------------------------- 0.3/9.3 MB ? eta -:--:--\n",
      "   ------- -------------------------------- 1.8/9.3 MB 7.7 MB/s eta 0:00:01\n",
      "   --------------- ------------------------ 3.7/9.3 MB 8.1 MB/s eta 0:00:01\n",
      "   ---------------------------- ----------- 6.6/9.3 MB 9.8 MB/s eta 0:00:01\n",
      "   ----------------------------------- ---- 8.4/9.3 MB 10.6 MB/s eta 0:00:01\n",
      "   ---------------------------------------- 9.3/9.3 MB 9.4 MB/s eta 0:00:00\n",
      "Downloading tokenizers-0.19.1-cp311-none-win_amd64.whl (2.2 MB)\n",
      "   ---------------------------------------- 0.0/2.2 MB ? eta -:--:--\n",
      "   ---------------------------------------- 2.2/2.2 MB 15.7 MB/s eta 0:00:00\n",
      "Installing collected packages: tokenizers, transformers\n",
      "  Attempting uninstall: tokenizers\n",
      "    Found existing installation: tokenizers 0.21.1\n",
      "    Uninstalling tokenizers-0.21.1:\n",
      "      Successfully uninstalled tokenizers-0.21.1\n",
      "Successfully installed tokenizers-0.19.1 transformers-4.42.4\n",
      "Note: you may need to restart the kernel to use updated packages.\n"
     ]
    }
   ],
   "source": [
    "pip install transformers==4.42.4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "0c9861b9",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "77c256cec97d44d0b66c9cccf201e970",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "0it [00:00, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['[CLS]', '小', '明', '给', '小', '红', '一', '束', '花', '，', '她', '很', '高', '兴', '。', '[SEP]']\n",
      "[101, 2207, 3209, 5314, 2207, 5273, 671, 3338, 5709, 8024, 1961, 2523, 7770, 1069, 511, 102]\n",
      "torch.Size([1, 16, 768])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"bert-base-chinese\")\n",
    "model = AutoModel.from_pretrained(\"bert-base-chinese\")\n",
    "\n",
    "# 进行分词\n",
    "sentence=\"小明给小红一束花，她很高兴。\"\n",
    "subtokenized_sentence=tokenizer.tokenize(sentence)\n",
    "subtokenized_sentence = [tokenizer._cls_token] + \\\n",
    "    subtokenized_sentence + [tokenizer._sep_token]\n",
    "subtoken_ids_sentence = tokenizer.convert_tokens_to_ids(\\\n",
    "    subtokenized_sentence)\n",
    "print(subtokenized_sentence)\n",
    "print(subtoken_ids_sentence)\n",
    "\n",
    "# 计算对应的特征\n",
    "outputs = model(torch.Tensor(subtoken_ids_sentence).\\\n",
    "    unsqueeze(0).long())\n",
    "hidden_states = outputs[0]\n",
    "print(hidden_states.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "49d7247c",
   "metadata": {},
   "source": [
    "假设已经通过提及检测模型找到了句子中的提及，这里用每个提及的第一个子词（在中文中也就是第一个字）作为词特征："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "60d1eedf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([4, 768])\n"
     ]
    }
   ],
   "source": [
    "# 提及的跨度，假设(-1,0)表示[CLS]的跨度，用于表示空提及[NA]，\n",
    "# 在实际训练中也可以额外定义个空提及符号\n",
    "mention_spans = [(-1,0),(0,2),(3,5),(10,11)]\n",
    "word_features = torch.concat([hidden_states[0,x+1].unsqueeze(0)\\\n",
    "    for (x,y) in mention_spans],0)\n",
    "print(word_features.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fd5d787f",
   "metadata": {},
   "source": [
    "首先，通过双仿射函数计算打分。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "ca991cb8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[    -inf,     -inf,     -inf,     -inf],\n",
      "         [206.4280,     -inf,     -inf,     -inf],\n",
      "         [114.6614, 368.4140,     -inf,     -inf],\n",
      "         [389.9779,  74.8093, 238.3196,     -inf]]], grad_fn=<AddBackward0>)\n",
      "tensor([[0, 1, 0]])\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "sys.path.append('E:\\孙大圣\\@Hands-on-NLP-main\\@Hands-on-NLP-main\\code')\n",
    "from my_utils import Biaffine\n",
    "biaffine = Biaffine(word_features.shape[1])\n",
    "\n",
    "# 对word_features进行打分\n",
    "scores = biaffine(word_features.unsqueeze(0),\\\n",
    "    word_features.unsqueeze(0))\n",
    "# 由于只关注当前提及之前的提及是否与其进行共指，\n",
    "# 因此将它转换为下三角函数，并且为上三角部分置为负无穷：\n",
    "scores = scores.tril(diagonal=-1)\n",
    "inf_mask = torch.zeros_like(scores)-torch.inf\n",
    "inf_mask = inf_mask.triu()\n",
    "scores += inf_mask\n",
    "print(scores)\n",
    "print(scores.argmax(-1)[:,1:])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "de88f8d7",
   "metadata": {},
   "source": [
    "由于模型未经过训练，因此仅通过双仿射函数初始化获得结构显然是错误的。我们可以训练模型，计算损失函数计算方式如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "9f2abedd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(189.6404, grad_fn=<NllLossBackward0>)\n"
     ]
    }
   ],
   "source": [
    "# 只计算除了[NA]以外的提及的损失\n",
    "target = torch.Tensor([0,0,1]).long()\n",
    "loss_func = torch.nn.NLLLoss()\n",
    "loss = loss_func(torch.nn.functional.log_softmax(scores[:,1:].\\\n",
    "    squeeze(0),-1),target)\n",
    "print(loss)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "880e61fb",
   "metadata": {},
   "source": [
    "接下来通过点积计算打分。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "f6a04704",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[    -inf,     -inf,     -inf,     -inf],\n",
      "        [235.2012,     -inf,     -inf,     -inf],\n",
      "        [188.3145, 267.1165,     -inf,     -inf],\n",
      "        [221.3709, 101.3911, 292.7801,     -inf]], grad_fn=<AddBackward0>)\n",
      "tensor([0, 1, 2])\n"
     ]
    }
   ],
   "source": [
    "scores2 = torch.matmul(word_features,word_features.T)\n",
    "scores2 = scores2.tril(diagonal=-1)\n",
    "inf_mask = torch.zeros_like(scores2)-torch.inf\n",
    "inf_mask = inf_mask.triu()\n",
    "scores2 += inf_mask\n",
    "print(scores2)\n",
    "print(scores2.argmax(-1)[1:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e5a10ed-59c3-41d7-ab07-e95269b3e420",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
