{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "35f5da24",
   "metadata": {},
   "source": [
    "下面的代码展示了Eisner算法："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "29443580",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 代码来源于GitHub项目yzhangcs/crfpar \n",
    "# (Copyright (c) 2020 Yu Zhang, MIT License（见附录）)\n",
    "import torch\n",
    "import sys\n",
    "sys.path.append('./code')\n",
    "from my_utils import stripe, pad\n",
    "\n",
    "\n",
    "def eisner(scores, mask):\n",
    "    '''\n",
    "    scores：大小为批大小 * 序列长度 * 序列长度，\n",
    "    每个位置表示依存关系的打分，\n",
    "    例如scores[0,1,2]就表示第0个输入样例上，\n",
    "    边2->1的打分，2为中心词，1为依存词。\n",
    "    \n",
    "    mask：批大小 * 序列长度，掩码长度与句子长度相同。\n",
    "    '''\n",
    "    # 获取输入的基本信息\n",
    "    lens = mask.sum(1)-1\n",
    "    batch_size, seq_len, _ = scores.shape\n",
    "    # 将scores矩阵从(batch,dep,head)形式转成(head,dep,batch)形式，\n",
    "    # 方便并行计算\n",
    "    scores = scores.permute(2, 1, 0)\n",
    "    # 初始化不完整跨度情况下的打分\n",
    "    s_i = torch.full_like(scores, float('-inf'))\n",
    "    # 初始化完整跨度情况下的打分\n",
    "    s_c = torch.full_like(scores, float('-inf'))\n",
    "    # 保存两种情况下的max j的位置\n",
    "    p_i = scores.new_zeros(seq_len, seq_len, batch_size).long()\n",
    "    p_c = scores.new_zeros(seq_len, seq_len, batch_size).long()\n",
    "    # 初始化完整跨度下长度为0的打分\n",
    "    s_c.diagonal().fill_(0)\n",
    "\n",
    "    for w in range(1, seq_len):\n",
    "        # 通过seq_len - w可以计算出当前长度有多少长度为w的跨度\n",
    "        n = seq_len - w\n",
    "        # 根据n生成0到n的列表\n",
    "        starts = p_i.new_tensor(range(n)).unsqueeze(0)\n",
    "        \n",
    "        # ---计算不完整跨度s(i,k,R,I)和s(i,k,L,I)的得分与最大值---\n",
    "        \n",
    "        # 计算s(i,j,R,C)+s(j+1,k,L,C)的值，\n",
    "        # 对于s(i,k,R,I)和s(i,k,L,I)的计算过程中，这部分相同\n",
    "        ilr = stripe(s_c, n, w) + stripe(s_c, n, w, (w, 1))\n",
    "        # n * w * batch_size -> batch_size * n * w\n",
    "        il = ir = ilr.permute(2, 0, 1)\n",
    "        # 在s(i,k,L,I)中，计算max(s(i,j,R,C)+s(j+1,k,L,C))的值\n",
    "        # 以及相应的位置\n",
    "        il_span, il_path = il.max(-1)\n",
    "        # 在求s_{ki}的过程时，我们的计算过程与第10章成分句法分析\n",
    "        # 中的基于跨度的方法类似。\n",
    "        # 不同的是由于k>i，因此在diagonal命令时需要用-w，让对角线下移\n",
    "        # 具体细节请查看PyTorch文档\n",
    "        s_i.diagonal(-w).copy_(il_span + scores.diagonal(-w))\n",
    "        # 保留最大的j值\n",
    "        p_i.diagonal(-w).copy_(il_path + starts)\n",
    "        \n",
    "        # 在s(i,k,R,I)中，计算max(s(i,j,R,C)+s(j+1,k,L,C))的值\n",
    "        # 以及相应的位置\n",
    "        ir_span, ir_path = ir.max(-1)\n",
    "        # 求s_{ik}，此时对角线上移\n",
    "        # 与此同时，这种方式可以保证s_i保存的方向为L的值与\n",
    "        # 方向为R的值互相不冲突，下同\n",
    "        s_i.diagonal(w).copy_(ir_span + scores.diagonal(w))\n",
    "        # 保留最大的j值\n",
    "        p_i.diagonal(w).copy_(ir_path + starts)\n",
    "        \n",
    "        \n",
    "        # ---计算不完整跨度s(i,k,R,C)和s(i,k,L,I)的得分与最大值---\n",
    "        \n",
    "        # 计算 s(i,j,L,C)+s(j,k,L,I) \n",
    "        cl = stripe(s_c, n, w, (0, 0), 0) + stripe(s_i, n, w, (w, 0))\n",
    "        cl_span, cl_path = cl.permute(2, 0, 1).max(-1)\n",
    "        # 将最大的得分进行保存\n",
    "        s_c.diagonal(-w).copy_(cl_span)\n",
    "        # 将最大的得分的位置进行保存\n",
    "        p_c.diagonal(-w).copy_(cl_path + starts)\n",
    "        \n",
    "        # 计算 s(i,j,R,I)+s(j,k,R,C)\n",
    "        cr = stripe(s_i, n, w, (0, 1)) + stripe(s_c, n, w, (1, w), 0)\n",
    "        cr_span, cr_path = cr.permute(2, 0, 1).max(-1)\n",
    "        # 将最大的得分进行保存\n",
    "        s_c.diagonal(w).copy_(cr_span)\n",
    "        # 将句子长度不等于w的(0,w)得分置为负无穷，\n",
    "        # 因为其在结构上不可能存在\n",
    "        s_c[0, w][lens.ne(w)] = float('-inf')\n",
    "        # 将最大的得分的位置进行保存\n",
    "        p_c.diagonal(w).copy_(cr_path + starts + 1)\n",
    "\n",
    "    def backtrack(p_i, p_c, heads, i, k, complete):\n",
    "        # 通过分治法找到当前跨度的最优分割\n",
    "        if i == k:\n",
    "            return\n",
    "        if complete:\n",
    "            # 如果当前跨度是完整跨度，取出得分最大的位置\n",
    "            j = p_c[i, k]\n",
    "            # 分别追溯s(i,j,I)和s(j,k,C)的最大值\n",
    "            backtrack(p_i, p_c, heads, i, j, False)\n",
    "            backtrack(p_i, p_c, heads, j, k, True)\n",
    "        else:\n",
    "            # 由于当前跨度是不完整跨度，因此根据定义，k的父节点一定是i\n",
    "            j, heads[k] = p_i[i, k], i\n",
    "            i, k = sorted((i, k))\n",
    "            # 追溯s(i,j,C)和s(j+1,k,C)的最大值\n",
    "            backtrack(p_i, p_c, heads, i, j, True)\n",
    "            backtrack(p_i, p_c, heads, k, j + 1, True)\n",
    "\n",
    "    preds = []\n",
    "    p_c = p_c.permute(2, 0, 1).cpu()\n",
    "    p_i = p_i.permute(2, 0, 1).cpu()\n",
    "    # 追溯最终生成的每个词的父节点\n",
    "    for i, length in enumerate(lens.tolist()):\n",
    "        heads = p_c.new_zeros(length + 1, dtype=torch.long)\n",
    "        backtrack(p_i[i], p_c[i], heads, 0, length, True)\n",
    "        preds.append(heads.to(mask.device))\n",
    "\n",
    "    return pad(preds, total_length=seq_len).to(mask.device)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b9b06fc0",
   "metadata": {},
   "source": [
    "给定输入句子“she learns the book hands-on-NLP”，<!--我们的依存分析模型为它的每个部分的打分，-->让我们来看最终输出结果："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "5f82f589",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "依存关系预测结果： tensor([[0, 0, 1, 2, 3, 4]])\n"
     ]
    }
   ],
   "source": [
    "# 创建分数矩阵\n",
    "score = torch.Tensor([\n",
    "    [ -1,  -1,  -1,  -1,  -1, -1],\n",
    "    [ -1,  -1,  1,  -1,  -1, -1],\n",
    "    [ 1, -1, -1, -1, -1, -1],\n",
    "    [ -1, -1, -1, -1, -1, 1],\n",
    "    [ -1, -1, -1, -1, -1, 1],\n",
    "    [ -1, -1, 1, -1, -1, -1]]).unsqueeze(0)\n",
    "\n",
    "mask = torch.ones_like(score[:,:,0]).long()\n",
    "\n",
    "deps=eisner(score,mask)\n",
    "# deps 中第0位为根节点\n",
    "print(\"依存关系预测结果：\", deps)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "90c41ccf",
   "metadata": {},
   "source": [
    "现在，我们来画一下这个依存句法树。这里使用HanLP代码包来画这个依存句法树。<!--hanlp是一个优秀的面向中文以及其他多语言的自然语言处理工具包。-->由于没有为标签进行打分，因此这里只给根节点打上ROOT标签，其余依存边无标签。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "51d3bd4a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div style=\"display: table; padding-bottom: 1rem;\"><pre style=\"display: table-cell; font-family: SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace; white-space: nowrap; line-height: 128%; padding: 0;\">Dep&nbsp;Tree&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br>────────────&nbsp;<br>┌───────────&nbsp;<br>└─►┌────────&nbsp;<br>&nbsp;&nbsp;&nbsp;└─►┌─────&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;└─►┌──&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;└─►&nbsp;</pre><pre style=\"display: table-cell; font-family: SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace; white-space: nowrap; line-height: 128%; padding: 0;\">Token&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br>────────────&nbsp;<br>she&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br>learns&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br>the&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br>book&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br>hands-on-NLP&nbsp;</pre><pre style=\"display: table-cell; font-family: SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace; white-space: nowrap; line-height: 128%; padding: 0;\">Rela<br>────<br>ROOT<br>&nbsp;&nbsp;&nbsp;&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;<br>&nbsp;&nbsp;&nbsp;&nbsp;</pre></div>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# !pip install -e hanlp_common\n",
    "from hanlp_common.document import Document\n",
    "# from document import Document\n",
    "\n",
    "tokens = [\"she\",\"learns\",\"the\",\"book\",\"hands-on-NLP\"]\n",
    "dependencies = [[x.item(), '' if x.item()!=0 else \"ROOT\"]\\\n",
    "    for x in deps[0,1:]]\n",
    "doc = Document(tok=tokens,dep=dependencies)\n",
    "doc.pretty_print()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8957599c",
   "metadata": {},
   "source": [
    "以下是MST的代码实现。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "aeff5fb7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 代码来源于GitHub项目tdozat/Parser-v1 \n",
    "# (Copyright (c) 2016 Timothy Dozat, Apache-2.0 License（见附录）)\n",
    "import numpy as np\n",
    "\n",
    "sys.path.append('./code')\n",
    "from tarjan import Tarjan\n",
    "\n",
    "def MST_inference(parse_probs, length, mask, ensure_tree = True):\n",
    "    # parse_probs：模型预测的每个词的父节点的概率分布，\n",
    "    # 大小为 length * length，顺序为(孩子节点,父节点)\n",
    "    # length：当前句子长度\n",
    "    # mask：与parse_probs大小一致，表示这句话的掩码\n",
    "    if ensure_tree:\n",
    "        # 根据mask大小，生成单位矩阵\n",
    "        I = np.eye(len(mask))\n",
    "        # 去除不合理元素，其中，通过(1-I)将对角线上的元素去除，\n",
    "        # 因为句法树不可能存在自环\n",
    "        parse_probs = parse_probs * mask * (1-I)\n",
    "        # 求出每个位置上概率最高的父节点\n",
    "        parse_preds = np.argmax(parse_probs, axis=1)\n",
    "        tokens = np.arange(1, length)\n",
    "        # 确认目前的根节点\n",
    "        roots = np.where(parse_preds[tokens] == 0)[0]+1\n",
    "        # 当没有根节点时，保证至少有一个根节点\n",
    "        if len(roots) < 1:\n",
    "            # 当前每个位置对根节点的概率\n",
    "            root_probs = parse_probs[tokens,0]\n",
    "            # 当前每个位置对概率最高的节点的概率\n",
    "            old_head_probs = parse_probs[tokens, parse_preds[tokens]]\n",
    "            # 计算根节点与概率最高节点的比值，作为选取根节点的相对概率\n",
    "            new_root_probs = root_probs / old_head_probs\n",
    "            # 选择最可能的根节点\n",
    "            new_root = tokens[np.argmax(new_root_probs)]\n",
    "            # 更新预测结果\n",
    "            parse_preds[new_root] = 0\n",
    "        # 当根节点数量超过1时，让根节点数量变为1\n",
    "        elif len(roots) > 1:\n",
    "            # 当前父节点的概率\n",
    "            root_probs = parse_probs[roots,0]\n",
    "            # 让当前所有的依存于根节点的位置（roots）归零\n",
    "            parse_probs[roots,0] = 0\n",
    "            # 获得新的潜在的父节点及其概率\n",
    "            new_heads = np.argmax(parse_probs[roots][:,\\\n",
    "                tokens], axis=1)+1\n",
    "            new_head_probs = parse_probs[roots,\\\n",
    "                new_heads] / root_probs\n",
    "            # 选择roots的潜在的新的父节点中，概率最小的位置，\n",
    "            # 将其父节点作为根节点\n",
    "            new_root = roots[np.argmin(new_head_probs)]\n",
    "            # 更新预测结果\n",
    "            parse_preds[roots] = new_heads\n",
    "            parse_preds[new_root] = 0\n",
    "        # 在通过贪心的方式获得所有位置的父节点后，\n",
    "        # 使用Tarjan算法找到当前图中的强联通分量，\n",
    "        # 使用MST算法将其中的环接触并且重新进行链接\n",
    "        tarjan = Tarjan(parse_preds, tokens)\n",
    "        # 当前的强联通分量（环）\n",
    "        cycles = tarjan.SCCs\n",
    "        for SCC in tarjan.SCCs:\n",
    "            # 当强联通分量里的节点数量超过1个，那么说明其有环\n",
    "            if len(SCC) > 1:\n",
    "                dependents = set()\n",
    "                to_visit = set(SCC)\n",
    "                # 将环内所有的节点以及它们所连接的外部节点\n",
    "                # 都加入孩子节点中\n",
    "                while len(to_visit) > 0:\n",
    "                    node = to_visit.pop()\n",
    "                    if not node in dependents:\n",
    "                        dependents.add(node)\n",
    "                        # 将当前节点指向的节点（孩子节点）\n",
    "                        # 加入要访问的队列中\n",
    "                        to_visit.update(tarjan.edges[node])\n",
    "                # 参与循环的节点的位置\n",
    "                cycle = np.array(list(SCC))\n",
    "                # 当前父节点的概率\n",
    "                old_heads = parse_preds[cycle]\n",
    "                old_head_probs = parse_probs[cycle, old_heads]\n",
    "                # 为了计算环里每个节点的新的父节点，\n",
    "                # 这些节点的孩子节点是这些节点的父节点显然是不可能的，\n",
    "                # 因此需要将它们的概率置为0\n",
    "                non_heads = np.array(list(dependents))\n",
    "                parse_probs[np.repeat(cycle, len(non_heads)),\\\n",
    "                    np.repeat([non_heads], len(cycle),\\\n",
    "                    axis=0).flatten()] = 0\n",
    "                # 新的概率分布下，求得环内所有节点新的\n",
    "                # 潜在父节点及其概率\n",
    "                new_heads = np.argmax(parse_probs[cycle][:,\\\n",
    "                    tokens], axis=1)+1\n",
    "                # 与旧的父节点计算比例\n",
    "                new_head_probs = parse_probs[cycle,\\\n",
    "                    new_heads] / old_head_probs\n",
    "                # 选择最有可能的变化，这样对于树的整体概率\n",
    "                # 影响最小，同时能将当前的环解除\n",
    "                change = np.argmax(new_head_probs)\n",
    "                changed_cycle = cycle[change]\n",
    "                old_head = old_heads[change]\n",
    "                new_head = new_heads[change]\n",
    "                # 更新预测结果\n",
    "                parse_preds[changed_cycle] = new_head\n",
    "                tarjan.edges[new_head].add(changed_cycle)\n",
    "                tarjan.edges[old_head].remove(changed_cycle)\n",
    "        return parse_preds\n",
    "    else:\n",
    "        # 当不强制要求树结构时，直接将预测结果返回\n",
    "        parse_probs = parse_probs * mask\n",
    "        parse_preds = np.argmax(parse_probs, axis=1)\n",
    "        return parse_preds"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "201b4abb",
   "metadata": {},
   "source": [
    "下面，我们设计一个使用11.2.2节所介绍的中心词选择解码会导致有环的情况，来看看MST算法的运行结果："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "0b87c935",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "不使用MST算法得到的依存关系为： [0 2 0 5 5 4]\n",
      "使用MST算法得到的依存关系为： [0 2 0 5 5 2]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# 第5个词的分数最高的中心词为第4个词，形成4->5 5->4的环\n",
    "score = np.array([\n",
    "    [ -1,  -1,  -1,  -1,  -1, -1],\n",
    "    [ -1,  -1,  1,  -1,  -1, -1],\n",
    "    [ 1, -1, -1, -1, -1, -1],\n",
    "    [ -1, -1, -1, -1, -1, 1],\n",
    "    [ -1, -1, -1, -1, -1, 1],\n",
    "    [ -1, -1, 1, -1, 1.1, -1]]) \n",
    "\n",
    "mask = np.ones_like(score)\n",
    "# 可以看出直接预测最大值会有环形成\n",
    "print('不使用MST算法得到的依存关系为：',np.argmax(score,1))\n",
    "deps=MST_inference(score,len(mask),mask)\n",
    "print('使用MST算法得到的依存关系为：',deps)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "81fb5488",
   "metadata": {},
   "source": [
    "这里我们来简单求一下边的交叉熵损失：\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "2c9eed6d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.6081)\n"
     ]
    }
   ],
   "source": [
    "score = torch.Tensor([\n",
    "    [ -1,  -1,  -1,  -1,  -1, -1],\n",
    "    [ -1,  -1,  1,  -1,  -1, -1],\n",
    "    [ 1, -1, -1, -1, -1, -1],\n",
    "    [ -1, -1, -1, -1, -1, 1],\n",
    "    [ -1, -1, -1, -1, -1, 1],\n",
    "    [ -1, -1, 1, -1, 1.1, -1]])\n",
    "# 假设我们的目标\n",
    "target = torch.Tensor([2,0,5,5,2]).long()\n",
    "# 计算交叉熵损失\n",
    "loss_func = torch.nn.NLLLoss()\n",
    "loss = loss_func(torch.nn.functional.log_softmax(score[1:],-1),target)\n",
    "print(loss)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "995f769b",
   "metadata": {},
   "source": [
    "<!--#### 推理代码实现-->\n",
    "\n",
    "下面提供一套代码来展示在解码过程中如何根据转移动作的打分去对栈和缓存进行操作。如果读者想要运行该代码，需自行定义model。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "10e02c70",
   "metadata": {},
   "outputs": [],
   "source": [
    "SHIFT_ID=0\n",
    "# 假设left_arc有两个label，nsubj和dobj\n",
    "LEFT_ARC_ID = {1: 'nsubj',2: 'dobj'}\n",
    "# 假设right_arc有3个label，nsubj、dobj和root\n",
    "RIGHT_ARC_ID = {3:'nsubj',4:'dobj',5:'root'}\n",
    "\n",
    "\n",
    "def decode(words,model):\n",
    "    # words：每个元素为(word_idx, word_text)的元组，\n",
    "    # word_idx为句子中的位置，word_text则为文本\n",
    "    # model：这里不具体构建模型，仅作为一个示例\n",
    "    # 缓存buffer初始化，将words翻转，能够保证pop()操作\n",
    "    # 能够从前往后进行\n",
    "    buffer = words[::-1]\n",
    "    # 栈stack初始化，0表示root节点\n",
    "    stack = [(0,'ROOT')]\n",
    "    # 保存生成的边\n",
    "    deps = []\n",
    "    # 循环转移迭代\n",
    "    while 1:\n",
    "        # 模型通过buffer、stack和history计算下一步操作的打分\n",
    "        log_probs = model(buffer,stack,history)\n",
    "        # 得到得分最高的操作id，范围为[0,5]\n",
    "        action_id = torch.max(log_probs)[1]\n",
    "        # 当action_id分别为0、1和大于1时，分别为其做SHIFT、\n",
    "        # REDUCE和push_nt操作\n",
    "        if action_id == SHIFT_ID:\n",
    "            buffer,stack = shift(buffer,stack)\n",
    "        elif action_id in LEFT_ARC_ID:\n",
    "            stack,deps = left_arc(stack,deps,action_id)\n",
    "        else:\n",
    "            stack,deps = right_arc(stack,deps,action_id)\n",
    "        \n",
    "        # 当缓存为空，栈只有一个子树时则退出\n",
    "        if len(buffer) == 0 and len(stack) == 1:\n",
    "            break\n",
    "    # 返回生成的树\n",
    "    return deps\n",
    "\n",
    "def shift(buffer,stack):\n",
    "    # 将buffer中的词移动到栈顶\n",
    "    word=buffer.pop()\n",
    "    # 这里只需要保留word_idx\n",
    "    stack.append(word)\n",
    "    return buffer, stack \n",
    "\n",
    "def left_arc(stack,deps,action_id):\n",
    "    # 因为是向左的弧，所以取出stack最后的两个词，倒数第一个词为中心词\n",
    "    head_word = stack.pop()\n",
    "    dep_word = stack.pop()\n",
    "    # 保存head，dep位置以及它们所对应的边，只需要保存word_idx\n",
    "    deps.append((head_word[0],dep_word[0],LEFT_ARC_ID[action_id]))\n",
    "    # 将中心词放回stack中\n",
    "    stack.append(head_word)\n",
    "    return stack, deps\n",
    "\n",
    "def right_arc(stack,deps,action_id):\n",
    "    # 因为是向右的弧，所以取出stack最后的两个词，倒数第二个词为中心词\n",
    "    dep_word = stack.pop()\n",
    "    head_word = stack.pop()\n",
    "    # 保存head，dep位置以及它们所对应的边\n",
    "    deps.append((head_word[0],dep_word[0],LEFT_ARC_ID[action_id]))\n",
    "    # 将中心词放回stack中\n",
    "    stack.append(head_word)\n",
    "    return stack, deps"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bdab937a-aa17-4435-b8db-2fcf292ca3d0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "sun",
   "language": "python",
   "name": "sun"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
