{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-11-03T10:26:41.379440Z",
     "start_time": "2025-11-03T10:26:40.232820Z"
    }
   },
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "import torch.nn.functional as F\n"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-03T10:26:41.520408Z",
     "start_time": "2025-11-03T10:26:41.509864Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from transformer_source.my.Transformer import MultiHeadAttention, LayerNorm, PositionwiseFeedForward\n",
    "class DecoderLayer(nn.Module):\n",
    "    def __init__(self, d_model, ffn_hidden, n_head, drop_prob):\n",
    "        super(DecoderLayer, self).__init__()\n",
    "        self.attention = MultiHeadAttention(d_model, n_head)\n",
    "        self.norm1 = LayerNorm(d_model)\n",
    "        self.dropout1 = nn.Dropout(drop_prob)\n",
    "        self.cross_attention = MultiHeadAttention(d_model, n_head)\n",
    "        self.norm2 = LayerNorm(d_model)\n",
    "        self.dropout2 = nn.Dropout(drop_prob)\n",
    "        self.ffn = PositionwiseFeedForward(d_model, ffn_hidden, drop_prob)\n",
    "        self.norm3 = LayerNorm(d_model)\n",
    "        self.dropout3 = nn.Dropout(drop_prob)\n",
    "    def forward(self, dec, enc, tgt_mask, src_mask):\n",
    "        _x = dec\n",
    "        x = self.attention(dec, dec, dec, tgt_mask)\n",
    "        x = self.dropout1(x)\n",
    "        x = self.norm1(x + _x)\n",
    "        _x = x\n",
    "        x = self.cross_attention(x, enc, enc, src_mask)\n",
    "        x = self.dropout2(x)\n",
    "        x = self.norm2(x + _x)\n",
    "        x = self.ffn(x)\n",
    "        x = self.dropout3(x)\n",
    "        x = self.norm3(x + _x)\n",
    "        return x"
   ],
   "id": "136ff263ac8cf59f",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-03T10:26:41.528624Z",
     "start_time": "2025-11-03T10:26:41.524885Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from transformer_source.my.Transformer import TransformerEmbedding\n",
    "\n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self, dec_voc_size, max_len, d_model, ffn_hidden, n_head, n_layer, drop_prob, device):\n",
    "        super(Decoder, self).__init__()\n",
    "        self.embedding = TransformerEmbedding(dec_voc_size, d_model, max_len, drop_prob, device)\n",
    "        self.layers = nn.ModuleList(\n",
    "            [\n",
    "                DecoderLayer(d_model, ffn_hidden, n_head, drop_prob) for _ in range(n_layer)\n",
    "            ]\n",
    "        )\n",
    "        self.fc = nn.Linear(d_model, dec_voc_size)\n",
    "    def forward(self, dec, enc, tgt_mask, src_mask):\n",
    "        dec = self.embedding(dec)\n",
    "        for layer in self.layers:\n",
    "            dec = layer(dec, enc, tgt_mask, src_mask)\n",
    "        dec = self.fc(dec)\n",
    "        return dec"
   ],
   "id": "5982019c846fcec8",
   "outputs": [],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-03T10:26:41.803246Z",
     "start_time": "2025-11-03T10:26:41.537444Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 设置参数\n",
    "dec_voc_size = 8000    # 目标词汇表大小\n",
    "enc_voc_size = 10000   # 源词汇表大小\n",
    "max_len = 512          # 最大序列长度\n",
    "d_model = 512          # 模型维度\n",
    "ffn_hidden = 2048      # 前馈网络隐藏层维度\n",
    "n_head = 8             # 注意力头数\n",
    "n_layer = 6            # 解码器层数\n",
    "batch_size = 2         # 批次大小\n",
    "src_seq_len = 10       # 源序列长度\n",
    "tgt_seq_len = 8        # 目标序列长度\n",
    "\n",
    "# 创建 Decoder 实例\n",
    "decoder = Decoder(\n",
    "    dec_voc_size=dec_voc_size,\n",
    "    max_len=max_len,\n",
    "    d_model=d_model,\n",
    "    ffn_hidden=ffn_hidden,\n",
    "    n_head=n_head,\n",
    "    n_layer=n_layer,\n",
    "    drop_prob=0.1,\n",
    "    device='cpu'\n",
    ")\n",
    "\n",
    "# 创建模拟输入数据\n",
    "dec_input = torch.randint(0, dec_voc_size, (batch_size, tgt_seq_len)).long()  # 确保是 long 类型\n",
    "enc_output = torch.randn(batch_size, src_seq_len, d_model)  # 模拟编码器输出\n",
    "tgt_mask = torch.ones(batch_size, 1, tgt_seq_len, tgt_seq_len)  # 目标序列掩码\n",
    "src_mask = torch.ones(batch_size, 1, tgt_seq_len, src_seq_len)  # 源序列掩码\n",
    "\n",
    "# 运行解码器\n",
    "output = decoder(dec_input, enc_output, tgt_mask, src_mask)\n",
    "\n",
    "# 打印结果\n",
    "print(f\"Decoder input shape: {dec_input.shape}\")      # [batch_size, tgt_seq_len]\n",
    "print(f\"Encoder output shape: {enc_output.shape}\")    # [batch_size, src_seq_len, d_model]\n",
    "print(f\"Decoder output shape: {output.shape}\")        # [batch_size, tgt_seq_len, dec_voc_size]\n",
    "print(f\"Sample output[0, :2, :5]:\\n{output[0, :2, :5]}\")\n"
   ],
   "id": "35ce9c6e0a6889d8",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Decoder input shape: torch.Size([2, 8])\n",
      "Encoder output shape: torch.Size([2, 10, 512])\n",
      "Decoder output shape: torch.Size([2, 8, 8000])\n",
      "Sample output[0, :2, :5]:\n",
      "tensor([[ 0.7477,  0.2286,  0.6451,  0.0256,  0.2148],\n",
      "        [ 1.3445, -0.2677,  0.4492,  0.4069,  1.0289]],\n",
      "       grad_fn=<SliceBackward0>)\n"
     ]
    }
   ],
   "execution_count": 4
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
