{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "<img src=\"Transformer.png\" alt=\"描述\" style=\"margin-left: auto; margin-right: auto; width:20%; height:auto; border-radius:10px;\">",
   "id": "d552f9ea846a557c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-04T03:09:48.266662Z",
     "start_time": "2025-11-04T03:09:48.251084Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "import torch.nn.functional as F\n",
    "import math"
   ],
   "id": "92f7234e88439594",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-04T03:09:48.287599Z",
     "start_time": "2025-11-04T03:09:48.275945Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from transformer_source.my.Transformer import Encoder, Decoder\n",
    "\n",
    "class Transformer(nn.Module):\n",
    "    def __init__(self,\n",
    "                 src_pad_ix,\n",
    "                 trg_pad_ix,\n",
    "                 enc_voc_size,\n",
    "                 dec_voc_size,\n",
    "                 d_model,\n",
    "                 max_len,\n",
    "                 n_heads,\n",
    "                 ffn_hidden,\n",
    "                 n_layers,\n",
    "                 drop_prod,\n",
    "                 device):\n",
    "        super(Transformer, self).__init__()\n",
    "        self.encoder = Encoder(enc_voc_size, max_len, d_model, ffn_hidden, n_heads, n_layers, drop_prod, device)\n",
    "        self.decoder = Decoder(dec_voc_size, max_len, d_model, ffn_hidden, n_heads, n_layers, drop_prod, device)\n",
    "        self.src_pad_ix = src_pad_ix\n",
    "        self.trg_pad_ix = trg_pad_ix\n",
    "        self.device = device\n",
    "\n",
    "    def make_pad_mask(self, q, k, pad_idx_q, pad_idx_k):\n",
    "        len_q, len_k = q.size(1), k.size(1)\n",
    "        q = q.ne(pad_idx_q).unsqueeze(1).unsqueeze(3)\n",
    "        q = q.repeat(1, 1, 1, len_k)\n",
    "        k = k.ne(pad_idx_k).unsqueeze(1).unsqueeze(2)\n",
    "        k = k.repeat(1, 1, len_q, 1)\n",
    "        mask = q & k\n",
    "        return mask\n",
    "\n",
    "    def make_casual_mask(self, q, k):\n",
    "        mask = torch.tril(torch.ones(q.size(1), k.size(1))).type(torch.BoolTensor).to(self.device)\n",
    "        return mask\n",
    "\n",
    "    def make_src_mask(self, src):\n",
    "        # 为交叉注意力创建源掩码\n",
    "        src_mask = src.ne(self.src_pad_ix).unsqueeze(1).unsqueeze(2)\n",
    "        return src_mask\n",
    "\n",
    "    def forward(self, src, trg):\n",
    "        src_mask = self.make_pad_mask(src, src, self.src_pad_ix, self.src_pad_ix)\n",
    "        trg_mask = self.make_pad_mask(trg, trg, self.trg_pad_ix, self.trg_pad_ix) * self.make_casual_mask(trg, trg)\n",
    "        src_cross_mask = self.make_src_mask(src)  # 为交叉注意力创建的掩码\n",
    "        enc = self.encoder(src, src_mask)\n",
    "        out = self.decoder(trg, enc, trg_mask, src_cross_mask)\n",
    "        return out"
   ],
   "id": "5df7a64dcfb46943",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-04T03:09:48.894051Z",
     "start_time": "2025-11-04T03:09:48.358072Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 设置模型参数\n",
    "src_pad_ix = 0      # 源序列填充符索引\n",
    "trg_pad_ix = 0      # 目标序列填充符索引\n",
    "enc_voc_size = 10000  # 源词汇表大小\n",
    "dec_voc_size = 8000   # 目标词汇表大小\n",
    "d_model = 512       # 模型维度\n",
    "max_len = 512       # 最大序列长度\n",
    "n_heads = 8         # 注意力头数\n",
    "ffn_hidden = 2048   # 前馈网络隐藏层维度\n",
    "n_layers = 6        # 编码器和解码器层数\n",
    "drop_prod = 0.1     # dropout概率\n",
    "batch_size = 2      # 批次大小\n",
    "src_seq_len = 10    # 源序列长度\n",
    "trg_seq_len = 8     # 目标序列长度\n",
    "\n",
    "# 创建Transformer实例\n",
    "transformer = Transformer(\n",
    "    src_pad_ix=src_pad_ix,\n",
    "    trg_pad_ix=trg_pad_ix,\n",
    "    enc_voc_size=enc_voc_size,\n",
    "    dec_voc_size=dec_voc_size,\n",
    "    d_model=d_model,\n",
    "    max_len=max_len,\n",
    "    n_heads=n_heads,\n",
    "    ffn_hidden=ffn_hidden,\n",
    "    n_layers=n_layers,\n",
    "    drop_prod=drop_prod,\n",
    "    device='cpu'\n",
    ")\n",
    "\n",
    "# 创建模拟输入数据\n",
    "src = torch.randint(1, enc_voc_size, (batch_size, src_seq_len)).long()  # 源序列输入\n",
    "trg = torch.randint(1, dec_voc_size, (batch_size, trg_seq_len)).long()  # 目标序列输入\n",
    "\n",
    "# 确保填充符存在（可选）\n",
    "src[:, 0] = src_pad_ix  # 将第一个位置设为填充符作为示例\n",
    "trg[:, 0] = trg_pad_ix\n",
    "\n",
    "# 运行前向传播\n",
    "output = transformer(src, trg)\n",
    "\n",
    "# 打印结果\n",
    "print(f\"Source input shape: {src.shape}\")           # [batch_size, src_seq_len]\n",
    "print(f\"Target input shape: {trg.shape}\")           # [batch_size, trg_seq_len]\n",
    "print(f\"Transformer output shape: {output.shape}\")   # [batch_size, trg_seq_len, dec_voc_size]\n",
    "print(f\"Sample output[0, :2, :5]:\\n{output[0, :2, :5]}\")\n"
   ],
   "id": "25a36a5a47d7e68e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Source input shape: torch.Size([2, 10])\n",
      "Target input shape: torch.Size([2, 8])\n",
      "Transformer output shape: torch.Size([2, 8, 8000])\n",
      "Sample output[0, :2, :5]:\n",
      "tensor([[-0.0587, -1.0064,  0.0592,  0.5845, -0.2412],\n",
      "        [ 0.0375, -0.7753,  0.3415,  0.5092,  0.4178]],\n",
      "       grad_fn=<SliceBackward0>)\n"
     ]
    }
   ],
   "execution_count": 6
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
