{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch  \n",
    "import torch.nn as nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TokenEmbedding(nn.Embedding):\n",
    "    \"\"\"\n",
    "    Token Embedding using torch.nn\n",
    "    they will dense representation of word using weighted matrix\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, vocab_size, d_model):\n",
    "        \"\"\"\n",
    "        class for token embedding that included positional information\n",
    "\n",
    "        :param vocab_size: 字典中词的个数\n",
    "        :param d_model: 嵌入维度\n",
    "        \"\"\"\n",
    "        super(TokenEmbedding, self).__init__(vocab_size, d_model, padding_idx=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "若batch_size是bs，输入词的个数是seq_len，词嵌入维度是n_model。\n",
    "\n",
    "因此输入的维度x：tensor(bs,seq_len)\n",
    "\n",
    "TokenEmbdding(x)后的维度：tensor(bs,seq_len,d_model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 假设的词汇表大小和嵌入维度  \n",
    "vocab_size = 1000  \n",
    "d_model = 512  \n",
    "  \n",
    "# 实例化TokenEmbedding类  \n",
    "token_embedding = TokenEmbedding(vocab_size, d_model)  \n",
    "  \n",
    "# 创建一些虚构的词汇索引  \n",
    "input_indices = torch.tensor([2, 5, 8, 12, 20, 33, 50, 100, 200, 500])  \n",
    "  \n",
    "# 使用嵌入层获取这些索引的嵌入向量  \n",
    "embeddings = token_embedding(input_indices)  \n",
    "  \n",
    "# 断言embeddings的形状是否正确  \n",
    "# 应该是(batch_size, d_model)，其中batch_size是input_indices中的元素数量  \n",
    "assert embeddings.shape == (input_indices.numel(), d_model), \"Embeddings shape is incorrect\"  \n",
    "  \n",
    "# 可以打印出嵌入向量的形状来验证  \n",
    "print(f\"Embeddings shape: {embeddings.shape}\")  \n",
    "  \n",
    "# 如果你还想检查padding_idx是否正确设置为1，并且其嵌入为零向量，可以执行以下操作：  \n",
    "padding_idx_embedding = token_embedding(torch.tensor([1]))  \n",
    "assert torch.allclose(padding_idx_embedding, torch.zeros(1, d_model)), \"Padding index embedding is not a zero vector\"  \n",
    "  \n",
    "print(\"All tests passed!\")"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
