{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "02967e40",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "\n",
    "class LSTMSequenceTagger(nn.Module):\n",
    "    \"\"\"基于双向LSTM的序列标注模型\"\"\"\n",
    "    def __init__(self,\n",
    "                 vocab_size,        # 词汇表大小\n",
    "                 embed_dim=128,     # 词嵌入维度\n",
    "                 hidden_dim=256,    # LSTM隐藏层维度\n",
    "                 num_tags=5,        # 标签类别数（如实体标签+非实体）\n",
    "                 num_layers=2,      # LSTM层数\n",
    "                 bidirectional=True, # 是否使用双向LSTM\n",
    "                 dropout=0.1):      # dropout概率\n",
    "        super().__init__()\n",
    "        \n",
    "        # 1. 词嵌入层\n",
    "        self.embedding = nn.Embedding(\n",
    "            num_embeddings=vocab_size,\n",
    "            embedding_dim=embed_dim,\n",
    "            padding_idx=0  # 假设0为填充符索引\n",
    "        )\n",
    "        \n",
    "        # 2. LSTM层\n",
    "        self.lstm = nn.LSTM(\n",
    "            input_size=embed_dim,\n",
    "            hidden_size=hidden_dim,\n",
    "            num_layers=num_layers,\n",
    "            bidirectional=bidirectional,\n",
    "            batch_first=True,  # 输入格式为[batch_size, seq_len, feature]\n",
    "            dropout=dropout if num_layers > 1 else 0  # 仅当层数>1时启用中间层dropout\n",
    "        )\n",
    "        \n",
    "        # 3. 注意力机制（用于增强关键位置特征）\n",
    "        self.attention = AttentionLayer(\n",
    "            hidden_dim=hidden_dim * (2 if bidirectional else 1)  # 双向LSTM输出维度翻倍\n",
    "        )\n",
    "        \n",
    "        # 4. 分类头（输出每个位置的标签概率）\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(\n",
    "                in_features=hidden_dim * (2 if bidirectional else 1),\n",
    "                out_features=hidden_dim\n",
    "            ),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(dropout),\n",
    "            nn.Linear(hidden_dim, num_tags)\n",
    "        )\n",
    "        \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self, x, lengths=None, mask=None):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            x: 输入序列，形状 [batch_size, seq_len]\n",
    "            lengths: 序列真实长度（用于LSTMPackedSequence），形状 [batch_size]\n",
    "            mask: 掩码（标记填充位置），形状 [batch_size, seq_len]，1表示有效位置\n",
    "        Returns:\n",
    "            logits: 每个位置的标签预测，形状 [batch_size, seq_len, num_tags]\n",
    "        \"\"\"\n",
    "        # 1. 词嵌入\n",
    "        embed = self.embedding(x)  # [batch_size, seq_len, embed_dim]\n",
    "        embed = self.dropout(embed)\n",
    "        \n",
    "        # 2. LSTM处理（支持变长序列）\n",
    "        if lengths is not None:\n",
    "            # 对变长序列进行打包（提升效率）\n",
    "            packed_embed = nn.utils.rnn.pack_padded_sequence(\n",
    "                embed, lengths, batch_first=True, enforce_sorted=False\n",
    "            )\n",
    "            packed_lstm_out, _ = self.lstm(packed_embed)\n",
    "            # 解包为正常序列\n",
    "            lstm_out, _ = nn.utils.rnn.pad_packed_sequence(\n",
    "                packed_lstm_out, batch_first=True\n",
    "            )\n",
    "        else:\n",
    "            # 固定长度序列直接输入\n",
    "            lstm_out, _ = self.lstm(embed)  # [batch_size, seq_len, hidden_dim*2]（双向）\n",
    "        \n",
    "        # 3. 应用注意力机制（增强重要特征）\n",
    "        attn_out = self.attention(lstm_out, mask)  # [batch_size, seq_len, hidden_dim*2]\n",
    "        \n",
    "        # 4. 分类预测\n",
    "        logits = self.classifier(attn_out)  # [batch_size, seq_len, num_tags]\n",
    "        \n",
    "        return logits\n",
    "\n",
    "\n",
    "class AttentionLayer(nn.Module):\n",
    "    \"\"\"加性注意力机制层（对序列位置加权）\"\"\"\n",
    "    def __init__(self, hidden_dim):\n",
    "        super().__init__()\n",
    "        self.w1 = nn.Linear(hidden_dim, hidden_dim // 2)\n",
    "        self.w2 = nn.Linear(hidden_dim // 2, 1)  # 输出每个位置的注意力权重\n",
    "\n",
    "    def forward(self, x, mask=None):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            x: LSTM输出特征，形状 [batch_size, seq_len, hidden_dim]\n",
    "            mask: 掩码，形状 [batch_size, seq_len]\n",
    "        Returns:\n",
    "            weighted_x: 注意力加权后的特征，形状 [batch_size, seq_len, hidden_dim]\n",
    "        \"\"\"\n",
    "        # 计算注意力分数\n",
    "        scores = self.w2(torch.tanh(self.w1(x)))  # [batch_size, seq_len, 1]\n",
    "        scores = scores.squeeze(-1)  # [batch_size, seq_len]\n",
    "        \n",
    "        # 对填充位置的注意力分数进行mask（设为负无穷，softmax后权重为0）\n",
    "        if mask is not None:\n",
    "            scores = scores.masked_fill(mask == 0, -1e9)\n",
    "        \n",
    "        # 计算注意力权重\n",
    "        attn_weights = F.softmax(scores, dim=1)  # [batch_size, seq_len]\n",
    "        \n",
    "        # 加权求和（广播机制）\n",
    "        weighted_x = x * attn_weights.unsqueeze(-1)  # [batch_size, seq_len, hidden_dim]\n",
    "        return weighted_x\n",
    "\n",
    "\n",
    "# 测试代码\n",
    "if __name__ == \"__main__\":\n",
    "    # 超参数设置\n",
    "    vocab_size = 5000\n",
    "    batch_size = 16\n",
    "    seq_len = 20\n",
    "    num_tags = 5  # 例如：B-LOC, I-LOC, B-PER, I-PER, O\n",
    "    \n",
    "    # 初始化模型\n",
    "    model = LSTMSequenceTagger(\n",
    "        vocab_size=vocab_size,\n",
    "        embed_dim=128,\n",
    "        hidden_dim=256,\n",
    "        num_tags=num_tags,\n",
    "        num_layers=2,\n",
    "        bidirectional=True\n",
    "    )\n",
    "    \n",
    "    # 生成随机输入\n",
    "    x = torch.randint(0, vocab_size, (batch_size, seq_len))  # 随机词索引\n",
    "    lengths = torch.randint(5, seq_len+1, (batch_size,))  # 随机序列长度（5到20之间）\n",
    "    mask = torch.zeros(batch_size, seq_len)\n",
    "    for i in range(batch_size):\n",
    "        mask[i, :lengths[i]] = 1  # 有效位置标记为1\n",
    "    \n",
    "    # 前向传播\n",
    "    logits = model(x, lengths, mask)\n",
    "    print(f\"输入形状: {x.shape}\")\n",
    "    print(f\"输出形状: {logits.shape}\")  # 应输出 [16, 20, 5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35cecf7c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    " \n",
    "# input_size输入特征的维度\n",
    "# hidden_size隐藏层的维度，即每个LSTM单元的隐藏状态向量的维度。\n",
    "# output_size：输出的维度。\n",
    "# num_layers：LSTM层的数量，默认为1。\n",
    " \n",
    " \n",
    "class LSTMModel(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, output_size, num_layers=1):\n",
    "        super(LSTMModel, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    " \n",
    "        # 定义lsmt层\n",
    "        # batch_first=True表示输入数据的形状是(batch_size, sequence_length, input_size)\n",
    "        # 而不是默认的(sequence_length, batch_size, input_size)。\n",
    "        # batch_size是指每个训练批次中包含的样本数量\n",
    "        # sequence_length是指输入序列的长度\n",
    "        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n",
    " \n",
    "        # 定义全连接层，将LSTM层的输出映射到最终的输出空间。\n",
    "        self.fc = nn.Linear(hidden_size, output_size)\n",
    " \n",
    "    def forward(self, x):\n",
    "        # 初始化了隐藏状态h0和细胞状态c0，并将其设为零向量。\n",
    "        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)\n",
    "        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)\n",
    " \n",
    "        # LSTM层前向传播\n",
    "        # 将输入数据x以及初始化的隐藏状态和细胞状态传入LSTM层\n",
    "        # 得到输出out和更新后的状态。\n",
    "        # out的形状为(batch_size, sequence_length, hidden_size)。\n",
    "        out, _ = self.lstm(x, (h0, c0))\n",
    " \n",
    "        # 全连接层前向传播\n",
    "        # 使用LSTM层的最后一个时间步的输出out[:, -1, :]（形状为(batch_size, hidden_size)）作为全连接层的输入，得到最终的输出。\n",
    "        out = self.fc(out[:, -1, :])\n",
    " \n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "583f9f84",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "\n",
    "class lstmcell(nn.Module):\n",
    "    def __init__(self,in_dim,hidden_dim):\n",
    "        self.ix=nn.Linear(in_dim.hidden_dim)\n",
    "        self.ih=nn.Linear(in_dim,hidden_dim)\n",
    "        self.fx=nn.Linear(in_dim.hidden_dim)\n",
    "        self.fh=nn.Linear(in_dim,hidden_dim)\n",
    "        self.ox=nn.Linear(in_dim.hidden_dim)\n",
    "        self.oh=nn.Linear(in_dim,hidden_dim)\n",
    "        self.cx=nn.Linear(in_dim.hidden_dim)\n",
    "        self.ch=nn.Linear(in_dim,hidden_dim)\n",
    "    def forward(self,x,h_1,c_1):\n",
    "        i=torch.sigmoid(self.ix(x)+self.ih(h_1))\n",
    "        f=torch.sigmoid(self.fx(x)+self.fh(h_1))\n",
    "        o=torch.sigmoid(self.ox(x)+self.oh(h_1))\n",
    "        c_=torch.tanh(self.cx(x)+self.ch(h_1))\n",
    "        c=f*c_1+i*c_\n",
    "        h=o*torch.tanh(c)\n",
    "        return h,c\n",
    "class LSTM(nn.Module):\n",
    "    def __init__(self,in_dim,hidden_dim):\n",
    "        super(LSTM,self).__init__()\n",
    "        self.hidden_dim=hidden_dim\n",
    "        self.in_dim=in_dim\n",
    "        self.lstmcell=lstmcell(in_dim,hidden_dim)\n",
    "    def forward(self,x):\n",
    "        outs=[]\n",
    "        for seq_x in x:\n",
    "            if h is None:h=torch.randn(x.shape[1],self.hidden_dim)\n",
    "            if c is None:c=torch.randn(x.shape[1],self.hidden_dim)\n",
    "            h,c=self.lstmcell(seq_x,h,c)\n",
    "            outs.append(torch.unsqueeze(h,0))\n",
    "            outs=torch.cat(outs)\n",
    "            return outs,h,c\n",
    "\n",
    "\n",
    "        \n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "cv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
