{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "f60096f5f506ffe0",
   "metadata": {},
   "source": [
    "### Cbow"
   ]
  },
  {
   "cell_type": "code",
   "id": "83b67e1f4dddeb5f",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:50:55.179572Z",
     "start_time": "2025-07-02T08:50:55.176337Z"
    }
   },
   "source": [
    "# 语料\n",
    "texts = [\n",
    "    \"I love natural language processing.\",\n",
    "    \"I love machine learning.\",\n",
    "    \"I love coding in Python and Java.\",\n",
    "    \"I love Java.\",\n",
    "    \"I don't love Java.\"\n",
    "]\n",
    "\n",
    "sentences = [text.split() for text in texts]\n",
    "sentences"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[['I', 'love', 'natural', 'language', 'processing.'],\n",
       " ['I', 'love', 'machine', 'learning.'],\n",
       " ['I', 'love', 'coding', 'in', 'Python', 'and', 'Java.'],\n",
       " ['I', 'love', 'Java.'],\n",
       " ['I', \"don't\", 'love', 'Java.']]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "code",
   "id": "637eb0ea94d500c5",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:50:56.607352Z",
     "start_time": "2025-07-02T08:50:56.604245Z"
    }
   },
   "source": [
    "# 词汇表\n",
    "vocabulary = {}\n",
    "for sentence in sentences:\n",
    "    for word in sentence:\n",
    "        if word not in vocabulary:\n",
    "            vocabulary[word] = len(vocabulary)\n",
    "\n",
    "vocab_size = len(vocabulary)  # 词汇表大小\n",
    "\n",
    "vocabulary"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'I': 0,\n",
       " 'love': 1,\n",
       " 'natural': 2,\n",
       " 'language': 3,\n",
       " 'processing.': 4,\n",
       " 'machine': 5,\n",
       " 'learning.': 6,\n",
       " 'coding': 7,\n",
       " 'in': 8,\n",
       " 'Python': 9,\n",
       " 'and': 10,\n",
       " 'Java.': 11,\n",
       " \"don't\": 12}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "cell_type": "code",
   "id": "90aff1d12366680d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:50:58.333377Z",
     "start_time": "2025-07-02T08:50:58.327879Z"
    }
   },
   "source": [
    "import torch\n",
    "\n",
    "\n",
    "# 生成训练数据\n",
    "def generate_training_data(sentences, window_size):\n",
    "    context_words = []  #周围词\n",
    "    target_words = []  #中心词\n",
    "\n",
    "    for sentence in sentences:\n",
    "        indices = [vocabulary[word] for word in sentence]\n",
    "        for center_pos in range(len(indices)):\n",
    "            # 确定上下文窗口范围\n",
    "            start = max(0, center_pos - window_size)\n",
    "            end = min(len(indices), center_pos + window_size + 1)\n",
    "\n",
    "            # 收集上下文词（排除中心词本身）\n",
    "            context = []\n",
    "            for context_pos in range(start, end):\n",
    "                if context_pos != center_pos:\n",
    "                    context.append(indices[context_pos])\n",
    "\n",
    "            # 至少需要1个上下文词\n",
    "            if len(context) >= 1:\n",
    "                context_words.append(context)\n",
    "                target_words.append(indices[center_pos])\n",
    "\n",
    "    return context_words, target_words\n",
    "\n",
    "\n",
    "WINDOW_SIZE = 2\n",
    "context_words, target_words = generate_training_data(sentences, WINDOW_SIZE)\n",
    "\n",
    "context_words, target_words"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "([[1, 2],\n",
       "  [0, 2, 3],\n",
       "  [0, 1, 3, 4],\n",
       "  [1, 2, 4],\n",
       "  [2, 3],\n",
       "  [1, 5],\n",
       "  [0, 5, 6],\n",
       "  [0, 1, 6],\n",
       "  [1, 5],\n",
       "  [1, 7],\n",
       "  [0, 7, 8],\n",
       "  [0, 1, 8, 9],\n",
       "  [1, 7, 9, 10],\n",
       "  [7, 8, 10, 11],\n",
       "  [8, 9, 11],\n",
       "  [9, 10],\n",
       "  [1, 11],\n",
       "  [0, 11],\n",
       "  [0, 1],\n",
       "  [12, 1],\n",
       "  [0, 1, 11],\n",
       "  [0, 12, 11],\n",
       "  [12, 1]],\n",
       " [0, 1, 2, 3, 4, 0, 1, 5, 6, 0, 1, 7, 8, 9, 10, 11, 0, 1, 11, 0, 12, 1, 11])"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "cell_type": "code",
   "id": "8f7dcbdc26b7567d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:52:23.427095Z",
     "start_time": "2025-07-02T08:52:23.403166Z"
    }
   },
   "source": [
    "# 因为输入的是周围词，长度不一样，所以要对齐一下长度\n",
    "max_context_len = max(len(ctx) for ctx in context_words)\n",
    "padded_context_words = []\n",
    "for ctx in context_words:\n",
    "    padded = ctx + [0] * (max_context_len - len(ctx))  # 用0填充\n",
    "    padded_context_words.append(padded)\n",
    "\n",
    "padded_context_words"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[[1, 2, 0, 0],\n",
       " [0, 2, 3, 0],\n",
       " [0, 1, 3, 4],\n",
       " [1, 2, 4, 0],\n",
       " [2, 3, 0, 0],\n",
       " [1, 5, 0, 0],\n",
       " [0, 5, 6, 0],\n",
       " [0, 1, 6, 0],\n",
       " [1, 5, 0, 0],\n",
       " [1, 7, 0, 0],\n",
       " [0, 7, 8, 0],\n",
       " [0, 1, 8, 9],\n",
       " [1, 7, 9, 10],\n",
       " [7, 8, 10, 11],\n",
       " [8, 9, 11, 0],\n",
       " [9, 10, 0, 0],\n",
       " [1, 11, 0, 0],\n",
       " [0, 11, 0, 0],\n",
       " [0, 1, 0, 0],\n",
       " [12, 1, 0, 0],\n",
       " [0, 1, 11, 0],\n",
       " [0, 12, 11, 0],\n",
       " [12, 1, 0, 0]]"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 6
  },
  {
   "cell_type": "code",
   "id": "7b2b90c260785171",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:55:49.033278Z",
     "start_time": "2025-07-02T08:55:49.029673Z"
    }
   },
   "source": [
    "from torch.utils.data import TensorDataset, DataLoader\n",
    "\n",
    "# 创建DataLoader\n",
    "dataset = TensorDataset(torch.LongTensor(padded_context_words), torch.LongTensor(target_words))\n",
    "dataloader = DataLoader(dataset, batch_size=1, shuffle=True)"
   ],
   "outputs": [],
   "execution_count": 11
  },
  {
   "cell_type": "code",
   "id": "55f2ad69a2db5ba3",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:55:50.012223Z",
     "start_time": "2025-07-02T08:55:50.009063Z"
    }
   },
   "source": [
    "from torch import nn\n",
    "from torch import optim\n",
    "\n",
    "\n",
    "# 定义CBOW模型\n",
    "class CBOW(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim)\n",
    "        self.linear = nn.Linear(embedding_dim, vocab_size)\n",
    "\n",
    "    def forward(self, context_words):\n",
    "        hidden = self.embedding(context_words) # 输入是周围词的下标\n",
    "        # 取所有周围词对应的向量的平均值，dim=1表示按第1维求平均，也就是把4个单词对应的向量加起来求平均，得到一个(1, 100)的向量\n",
    "        avg = torch.mean(hidden, dim=1)\n",
    "        # 最终得到一个(1, vocab_size)的结果\n",
    "        return self.linear(avg)\n"
   ],
   "outputs": [],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:55:51.829811Z",
     "start_time": "2025-07-02T08:55:51.826430Z"
    }
   },
   "cell_type": "code",
   "source": [
    "EMBEDDING_DIM = 100\n",
    "\n",
    "# 初始化模型、损失函数和优化器\n",
    "model = CBOW(vocab_size, EMBEDDING_DIM)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(model.parameters(), lr=0.01)"
   ],
   "id": "2d045509b135dbd1",
   "outputs": [],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:55:54.080531Z",
     "start_time": "2025-07-02T08:55:53.990668Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 开始训练\n",
    "EPOCHS = 10\n",
    "\n",
    "for epoch in range(EPOCHS):\n",
    "    for context_batch, target_batch in dataloader:\n",
    "        # 前向传播\n",
    "        predict = model(context_batch)\n",
    "\n",
    "        # 计算损失\n",
    "        loss = criterion(predict, target_batch)\n",
    "\n",
    "        # 反向传播\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        print(f\"Epoch {epoch + 1}/{EPOCHS}, Loss: {loss:.4f}\")"
   ],
   "id": "9a5db4684fc85d36",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10, Loss: 1.2978\n",
      "Epoch 2/10, Loss: 2.7502\n",
      "Epoch 3/10, Loss: 1.6033\n",
      "Epoch 4/10, Loss: 2.5369\n",
      "Epoch 5/10, Loss: 1.2744\n",
      "Epoch 6/10, Loss: 2.3671\n",
      "Epoch 7/10, Loss: 1.0126\n",
      "Epoch 8/10, Loss: 0.6802\n",
      "Epoch 9/10, Loss: 1.3691\n",
      "Epoch 10/10, Loss: 1.9332\n"
     ]
    }
   ],
   "execution_count": 14
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-02T08:56:31.655395Z",
     "start_time": "2025-07-02T08:56:31.637437Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 获取词向量\n",
    "word_vectors = model.embedding.weight.data\n",
    "word_vectors[0]"
   ],
   "id": "8c8e1b0ba6ea1925",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.2967, -0.5988,  0.2280,  1.0853,  0.7590,  1.7825,  0.2758, -0.7288,\n",
       "         0.0369,  1.2717, -0.4428, -0.7248,  0.6584, -1.5620,  2.0081,  0.4657,\n",
       "         1.0939, -0.0747,  0.0314,  0.9205, -2.1069, -0.2434, -1.1275,  0.9822,\n",
       "        -0.2861,  0.3473, -0.6703, -0.7796,  1.1333, -0.0749, -1.1759,  0.5953,\n",
       "        -0.3965,  1.2833,  0.6164, -0.5214, -0.5624, -0.0947,  0.0883, -1.0216,\n",
       "         0.2802, -1.1298, -0.7405,  1.5705, -2.2763,  0.9589, -0.0679, -0.3765,\n",
       "         0.6009,  1.0066, -1.3075, -0.2264, -0.6970, -1.2350, -1.4696,  0.4623,\n",
       "         0.8900,  1.0318,  0.7152,  1.9189, -0.1426,  0.4389, -2.8959,  0.3906,\n",
       "        -0.8208,  0.0444,  1.5230, -0.8521,  0.5000,  0.1148,  0.5001, -0.9286,\n",
       "        -0.8682, -1.0222,  1.1724, -1.7080, -0.1134, -0.2399, -0.1095, -0.3548,\n",
       "        -1.2254, -0.3880, -0.0826, -0.2760, -0.0954,  1.6074,  0.2779,  0.9762,\n",
       "        -2.0818,  1.1898, -0.7999, -0.5842,  0.5650,  0.9860, -1.3714,  0.5248,\n",
       "        -1.6643, -0.6619, -0.0199, -1.2219])"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-06-28T00:12:48.084601Z",
     "start_time": "2025-06-28T00:12:48.076863Z"
    }
   },
   "cell_type": "code",
   "source": [
    "for i, word in enumerate(vocabulary):\n",
    "    print(f\"{word}: {word_vectors[i]}...\")"
   ],
   "id": "d0476739974fa058",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "I: tensor([-1.2879,  0.0893, -1.1479,  0.0218,  0.9643, -0.2443,  0.1786,  0.7927,\n",
      "        -0.5697,  1.5046, -0.6562,  1.0481, -0.4268,  1.7215, -1.4988, -0.9342,\n",
      "        -0.7452, -1.8768, -1.3421, -0.3721, -0.2584, -0.0897,  0.7166, -2.2192,\n",
      "         0.8837, -0.3964, -1.4817,  0.0797,  1.7578, -1.1137,  0.4486, -0.7131,\n",
      "         0.7892, -0.4017,  0.8018,  1.5043, -1.5189,  0.6870,  1.7420, -1.1580,\n",
      "         0.4872, -1.8187,  2.3678, -2.4558,  0.2938, -0.3302,  0.3800,  2.4546,\n",
      "        -1.0300, -0.7990,  0.7960,  1.5282, -0.6037, -0.2804, -0.6770,  0.3777,\n",
      "         0.3295, -0.9401,  1.9448, -0.3217, -0.6934,  1.6602, -0.1949, -0.6395,\n",
      "         0.4326, -1.5242, -2.5998, -0.0544, -1.3022,  0.2743, -0.0964,  0.1917,\n",
      "        -0.4260,  0.9317, -0.5707, -0.5126,  0.1776,  0.0453,  0.3414,  0.3231,\n",
      "         1.4054, -1.7485,  0.0766, -0.4309, -1.1353,  0.1461,  2.3064, -1.2006,\n",
      "         0.6369, -0.2921, -0.0825,  0.1735, -2.4638, -0.1835,  0.2568, -0.0890,\n",
      "        -0.9462, -1.1369, -0.1405,  0.1883])...\n",
      "love: tensor([-0.7469,  0.7927,  0.5973,  0.9452, -0.3087,  0.1090, -0.0873, -0.8723,\n",
      "        -0.5450,  0.8872,  1.0801, -0.0359, -1.0753, -0.3591,  1.2889, -0.8620,\n",
      "         0.0577, -0.1475,  0.1677,  0.6909, -0.5839,  0.9930, -0.7884,  0.4174,\n",
      "        -1.2593, -0.0669,  1.4583,  1.1372, -0.2916,  0.8296,  2.6738, -1.4103,\n",
      "        -0.4504, -0.8285,  0.2185, -1.9804,  0.0272,  0.9180,  0.0634,  0.8912,\n",
      "        -0.9941,  0.0232, -0.4545,  0.9764, -0.3662, -0.9161, -1.6346,  1.1036,\n",
      "         0.3148,  0.0772, -0.4815,  1.5579,  0.1361, -0.8385, -0.1584, -1.1115,\n",
      "         0.0506, -0.4875, -1.2744, -0.7559, -0.4437,  1.0806,  1.5463, -1.4092,\n",
      "        -0.0736, -0.5316, -0.2370,  0.3539,  0.6656, -0.8892, -0.9897, -0.8541,\n",
      "         0.2514,  0.5445, -0.0862, -0.1133, -0.3261,  0.4492,  0.5205, -0.3687,\n",
      "         0.4688, -0.3251, -0.3930,  0.1947, -0.0492, -0.8297, -0.0244,  0.7693,\n",
      "        -0.9217,  0.7536,  0.5948,  0.2771, -2.1689,  1.2397,  0.0294,  0.4429,\n",
      "        -0.3743,  0.9790,  1.1711,  1.7351])...\n",
      "natural: tensor([ 1.1452, -1.7086,  0.1945,  1.0741,  0.0027, -0.1976, -0.5049, -1.3581,\n",
      "         0.2012, -0.4372,  0.0722,  0.6385, -1.5561, -0.4095,  2.1941, -0.5045,\n",
      "         0.9584, -1.8614,  1.3718, -0.5066, -0.1068,  0.4451, -0.5411, -0.3574,\n",
      "         0.8741, -0.3123,  1.0156,  0.4191,  1.0369,  0.2960, -0.6562, -0.7239,\n",
      "        -1.6712,  0.7239, -1.9114,  0.0135, -0.4506, -0.0110, -0.7877, -0.4236,\n",
      "        -1.8480, -1.2897, -0.7373,  0.4696,  1.4594, -0.9769, -0.3888,  0.4780,\n",
      "        -0.1705,  1.0394,  0.0056, -0.1496, -0.2679, -1.5703,  1.4392, -0.9743,\n",
      "         0.8374,  2.6604, -0.9251, -0.1197,  0.8127,  0.2825, -1.1472, -0.1571,\n",
      "         0.4663,  0.0680, -0.4478, -1.4739, -0.2452,  0.1726,  0.2382, -0.3377,\n",
      "        -0.9033,  0.6269,  0.9280,  0.8290, -1.5097,  0.8158,  1.3067,  0.0744,\n",
      "        -0.4784, -0.6395,  1.3709,  1.4812,  0.1856,  0.2076, -1.4304,  0.7175,\n",
      "        -1.0373, -0.0784,  1.1109,  0.2969, -0.7803, -0.5775,  2.3950,  2.2637,\n",
      "         2.3246,  1.4351,  1.5325,  1.4221])...\n",
      "language: tensor([ 0.1367, -0.1966, -1.0607, -0.6749,  0.1166, -0.3719,  1.5527, -0.1403,\n",
      "         0.6922, -1.2248,  1.8625, -0.4783,  0.8548, -2.0033,  0.7569, -0.5993,\n",
      "        -0.3493, -0.1552, -0.2683, -1.7548, -1.4415, -0.4555, -0.1015, -1.8174,\n",
      "        -0.3659, -0.5173, -0.1323, -0.6214, -0.8033, -0.6638,  1.6500, -1.0760,\n",
      "        -0.5038, -0.9528,  0.2609, -1.0171, -2.0417,  0.2266,  0.0996, -1.3165,\n",
      "         1.9910, -1.0796,  0.1471,  0.8655,  1.3320, -0.1577,  2.5624,  1.2568,\n",
      "        -0.8977,  1.3988, -0.9472,  0.2588, -0.4201,  0.0776, -2.6393,  0.1347,\n",
      "        -0.3184,  0.6247, -0.0342, -1.9025,  0.9253,  0.5420, -1.0783,  0.4188,\n",
      "         0.2537,  0.9669,  0.9011, -1.5767,  0.1276,  1.2144, -1.2333, -1.5772,\n",
      "        -0.3982, -1.5374, -0.4254,  0.4439, -0.7355,  1.1512,  1.7890, -0.5252,\n",
      "        -1.3646, -1.5439, -0.2440,  1.1548, -1.2794, -1.5800,  0.8749, -1.0392,\n",
      "         0.4321, -1.6774,  1.0732, -1.0432, -0.5762, -0.0299,  1.4053, -0.1275,\n",
      "        -0.8232, -0.3206, -0.3543,  2.3570])...\n",
      "processing.: tensor([ 0.9555,  1.0958, -0.4647,  0.0323, -0.9208, -1.6902,  0.8349, -0.2045,\n",
      "        -1.7483,  0.9490,  0.4040,  0.4908, -0.5354, -0.5120, -1.6389, -0.3083,\n",
      "         0.8958, -1.0817, -2.8027, -1.5081, -0.9817,  0.8876,  1.0255,  0.2003,\n",
      "         0.6627,  0.5219,  0.4336, -0.2641, -0.9177, -0.6419, -0.2455,  1.4315,\n",
      "         1.4284, -0.1363,  0.9638, -0.8450,  0.0140, -0.7070, -0.3889, -1.0801,\n",
      "        -0.1461,  0.4662, -1.2820,  1.0061, -2.3189, -0.1634, -1.1933,  0.7002,\n",
      "         1.0020,  1.1082,  0.4980, -0.3028, -1.0222,  1.0662,  0.5468, -1.4693,\n",
      "        -0.1110, -1.2757,  1.7365,  0.7464, -1.0416, -1.1796,  1.4037, -0.2418,\n",
      "        -0.3754,  0.4044,  0.6522, -0.7879, -0.4223,  0.0334,  1.3581,  0.6261,\n",
      "         0.3690,  0.1305,  2.0293,  2.0552, -0.7725, -1.3190, -0.4206, -0.1115,\n",
      "        -0.1651, -1.0787, -0.6535, -1.3770, -0.2210,  3.0791, -2.5604, -0.0171,\n",
      "        -0.1644, -0.5029, -1.1250, -1.0101,  0.3263, -1.0266, -0.2485,  2.1515,\n",
      "         0.1512,  0.8699, -1.1093, -0.0365])...\n",
      "machine: tensor([ 2.8353e-01,  5.5538e-01,  1.1382e+00, -4.1367e-02,  1.2594e-01,\n",
      "        -4.0475e-01, -1.0192e+00,  1.7614e+00, -8.8186e-01,  5.7806e-04,\n",
      "         1.7746e-01, -1.7145e+00,  1.1584e+00,  1.2816e+00,  3.1802e-01,\n",
      "        -1.3009e+00, -2.1844e-01, -1.6759e+00, -1.1234e+00,  1.1825e+00,\n",
      "         1.1548e-01,  1.0514e+00,  1.2764e-01, -6.8576e-02,  4.0646e-01,\n",
      "         1.0235e+00, -6.1194e-01,  4.4954e-01,  1.9894e+00, -8.4358e-01,\n",
      "        -3.4396e-02, -6.1267e-01, -1.3229e+00, -2.3479e-01, -1.3261e+00,\n",
      "        -8.7815e-01, -2.4866e-01,  1.1802e+00, -1.4633e-01, -1.0894e+00,\n",
      "         4.3357e-02, -6.7782e-01,  4.3091e-01, -1.4339e-01, -1.5798e+00,\n",
      "        -1.6554e+00,  1.8234e-01,  1.6708e+00, -2.9356e-01,  2.0252e+00,\n",
      "        -9.8381e-01, -1.1180e-01,  3.0309e-01, -7.1672e-01,  1.5282e+00,\n",
      "         2.0257e+00, -1.5300e-01, -9.9495e-02,  1.6394e-01,  1.0189e+00,\n",
      "        -1.6759e+00,  9.1261e-01, -4.3808e-01, -3.2212e-01, -8.8882e-01,\n",
      "         2.6739e-01, -1.0385e+00, -2.9381e-01, -7.4597e-01,  2.2781e-01,\n",
      "        -7.8947e-03, -1.0032e-01, -9.0290e-01,  1.3959e-01,  6.9051e-01,\n",
      "        -5.3710e-01, -8.3627e-01,  2.4816e+00,  1.3830e+00,  2.8199e-01,\n",
      "         7.2206e-01,  4.3508e-01, -1.7364e-01,  1.4474e+00, -4.9552e-01,\n",
      "         1.7669e-01,  3.6736e-01, -2.7694e-01,  3.0361e+00, -1.5840e+00,\n",
      "         1.7777e-01,  3.6242e-01,  2.8935e+00, -3.7606e-02,  1.0138e+00,\n",
      "         1.6506e-01, -3.5567e-01, -4.9948e-01,  1.4133e+00,  2.5603e-01])...\n",
      "learning.: tensor([ 6.5638e-01,  2.0794e+00, -1.8790e+00,  7.2376e-01,  7.1435e-01,\n",
      "         7.8507e-02,  1.9245e+00,  8.0917e-01, -4.9232e-01,  4.7807e-01,\n",
      "         4.2711e-01,  8.3950e-01, -4.2122e-01, -2.7169e-01,  1.3765e+00,\n",
      "         1.9404e-03, -7.2239e-01,  3.0578e-01, -7.9274e-02,  7.4520e-01,\n",
      "         1.0329e-01,  1.4055e-01, -1.1092e+00, -2.4865e-01,  1.0099e+00,\n",
      "         2.8870e-01,  3.7637e-01,  2.2076e-01,  2.3821e-01,  1.0738e+00,\n",
      "         1.6249e-01, -1.8562e+00, -6.8208e-01, -1.0526e+00,  8.5826e-01,\n",
      "        -1.5128e-01,  6.2102e-01, -5.4666e-01,  7.3239e-01, -4.3432e-01,\n",
      "         6.3752e-03, -1.3024e-01, -1.1164e-01, -6.6427e-01, -8.3378e-01,\n",
      "        -1.3626e+00, -4.3327e-01, -3.5389e-01,  7.2995e-01,  1.4612e+00,\n",
      "         1.1516e+00,  1.8966e+00,  8.8230e-01,  5.2221e-01, -1.9610e+00,\n",
      "         1.5242e-01,  4.2316e-01, -2.5094e+00, -8.3551e-02,  6.9278e-01,\n",
      "        -1.5326e+00,  1.4821e+00,  4.8090e-01,  2.8395e-01, -2.3458e-01,\n",
      "        -1.1493e+00, -2.6641e+00, -5.8393e-01, -8.2938e-01,  3.8847e-01,\n",
      "        -6.3079e-01, -1.1804e+00,  3.4304e-01, -2.5379e-01, -5.9796e-01,\n",
      "         1.0402e+00,  8.5637e-01,  9.0537e-01,  7.3992e-01, -1.9489e-01,\n",
      "        -5.9842e-02, -1.4103e+00, -1.5040e+00, -9.3010e-01, -8.5670e-02,\n",
      "        -1.5787e+00,  1.6979e+00,  1.7052e-01, -1.3268e-01, -2.3127e+00,\n",
      "        -1.0576e+00, -2.0878e-01, -4.7334e-01,  1.7842e+00, -7.8881e-01,\n",
      "         1.4472e+00, -6.7230e-01, -6.5708e-01,  1.6243e+00,  3.3458e-01])...\n",
      "coding: tensor([ 6.9349e-01,  2.1945e+00, -3.4659e-01,  5.2093e-01,  5.1745e-01,\n",
      "        -2.4519e-01,  1.7629e-01, -1.5921e+00, -2.5516e-01,  5.0046e-01,\n",
      "         2.4193e-01,  3.4791e-01, -2.7992e-01, -1.1664e+00,  1.8946e+00,\n",
      "         1.2752e-01, -1.4612e+00, -7.5552e-01, -1.5345e+00, -4.3800e-01,\n",
      "        -2.4418e-03, -2.0795e-01, -1.1684e-01, -1.0218e+00,  5.1796e-01,\n",
      "        -7.6891e-01, -3.7896e-01,  4.7556e-01, -7.4699e-01,  2.0371e+00,\n",
      "        -7.7355e-03, -3.2200e-01,  1.5895e+00,  2.6794e-01, -8.8700e-01,\n",
      "        -9.6870e-01, -1.4119e+00, -1.9152e+00, -5.1793e-01, -1.7192e+00,\n",
      "        -1.5790e+00,  5.3015e-01, -4.3957e-01, -1.4615e+00, -1.2469e+00,\n",
      "        -5.4756e-03,  4.5220e-01,  5.6526e-01, -3.0777e-01, -1.9570e-01,\n",
      "         3.4985e+00,  8.7888e-02,  6.7763e-01, -9.3473e-02,  7.7910e-01,\n",
      "         3.5039e-03,  3.0346e-01,  1.3935e+00,  7.5268e-01, -1.8957e-01,\n",
      "         1.5273e-01,  3.1886e-01,  5.0928e-01,  1.6598e+00,  2.7519e-01,\n",
      "         1.9884e-01, -7.3292e-02, -2.2001e-01,  2.6598e-01, -5.1585e-02,\n",
      "         2.7954e-01,  9.4609e-01,  9.7649e-01, -3.3597e-01,  1.7188e+00,\n",
      "         2.2010e-01, -1.3953e+00,  2.0240e+00,  9.4365e-01, -1.8911e-01,\n",
      "        -2.2969e+00, -3.4318e-01,  1.0246e+00, -6.0111e-01,  1.0704e+00,\n",
      "        -5.0674e-01, -1.9217e-01,  1.2126e-01,  6.3173e-01, -2.5088e-01,\n",
      "        -8.6972e-01,  7.0644e-01, -3.9098e-01, -1.9937e-01,  3.8131e-01,\n",
      "        -9.3052e-01, -8.2588e-01,  1.4092e+00, -2.2764e-01,  3.8585e-01])...\n",
      "in: tensor([ 0.8650,  0.7610,  1.0987, -1.1839, -0.8968,  0.7866, -0.7051, -0.9965,\n",
      "         0.9227,  0.8616,  0.5359, -0.9916,  2.1031, -1.9236,  0.2114, -1.6044,\n",
      "         0.4625,  0.9952,  0.5328, -1.3678, -0.0837, -0.4445,  1.6455, -0.7981,\n",
      "         0.4981,  1.6599, -0.2672, -1.7302,  0.8230,  0.4611, -0.5661,  0.2596,\n",
      "        -1.4187,  0.8535, -0.6992,  1.3106, -2.2801, -0.3639, -0.0393, -1.0943,\n",
      "         1.2731, -1.8489, -0.3729,  3.3543, -2.2612, -1.2929, -0.2763,  0.1483,\n",
      "         0.2496, -0.7464, -0.5259,  1.8614,  1.1702,  1.9650, -0.7989, -1.6404,\n",
      "        -1.6270,  1.6171,  0.7640, -0.2773,  0.2039,  0.5613,  0.0459,  0.1356,\n",
      "         0.3922, -0.1363,  0.5961, -0.0289, -1.2368,  0.5434,  0.5387, -0.8010,\n",
      "        -1.2507,  0.8376, -0.1534,  0.9692,  0.6289, -0.3613, -0.7414,  1.3925,\n",
      "         0.6280,  0.4447, -0.5044, -0.0733, -0.5563,  1.3768,  1.0265,  1.9612,\n",
      "        -0.2982, -1.7982,  0.8357, -0.4298, -0.7109,  1.0848, -0.5294,  0.1904,\n",
      "         0.8637,  0.1654,  1.1726,  0.6421])...\n",
      "Python: tensor([ 1.3685,  1.5005,  0.5905, -0.9589, -0.6013, -0.4894,  1.2060,  0.4207,\n",
      "         0.0099,  0.4298,  1.2337,  0.8499, -0.2915,  0.8352,  1.4447,  0.5733,\n",
      "         0.5739, -0.6129,  0.3023, -0.4243,  0.1339,  0.1916, -0.9422, -1.4189,\n",
      "        -2.3425,  1.5904, -0.2495,  2.0980,  1.0178, -0.7086, -0.9547,  2.6173,\n",
      "         1.0718,  1.6886,  0.2835, -0.4825, -0.6741,  0.6472, -1.3244,  0.0360,\n",
      "         0.6673, -1.0146,  1.2835,  1.1900, -0.3340,  0.2109, -0.3380, -0.4497,\n",
      "         1.6131,  0.5162, -0.4905,  0.7203, -0.1355,  0.2177,  0.4930, -0.6963,\n",
      "         1.6326, -0.6410, -0.6969,  1.1012, -0.9755, -1.4707,  0.1789, -1.6458,\n",
      "        -1.8410,  0.3591, -1.3362,  0.0971,  0.4287,  0.1641,  0.6205,  0.3658,\n",
      "         0.6122, -0.7219,  0.2442,  0.7049,  1.1210, -1.6070,  0.1715, -0.4970,\n",
      "         2.4767,  0.0725, -0.6479, -0.2899, -0.6451, -0.6810,  1.0050, -0.3079,\n",
      "        -0.7936, -0.9280,  1.3923,  0.5027, -1.1263,  0.4353,  0.1633,  1.7797,\n",
      "         1.1065, -1.5728, -0.6671,  0.4893])...\n",
      "and: tensor([ 0.4467, -0.5146,  0.1263,  1.0881, -0.3053, -0.4487, -0.2667,  2.0166,\n",
      "         0.2851, -0.6910,  0.1798,  1.0122, -2.2350, -0.7150, -1.0470, -1.7279,\n",
      "         0.5979, -0.9561,  1.2183,  0.7862,  0.4912,  1.3577,  0.2252,  1.0877,\n",
      "         1.0847,  0.6038, -0.0960,  0.1662, -0.2268,  0.0777, -2.4257, -1.2340,\n",
      "        -1.6721,  0.0399, -1.8820,  0.4170,  0.5973,  0.5159,  2.5365,  0.4021,\n",
      "        -1.5371,  1.4211,  0.1041, -0.1240, -0.1475, -0.8835,  0.7486,  1.7786,\n",
      "         0.1447, -0.1858, -0.3940, -0.2461, -1.0524, -1.4203, -0.3941, -0.2923,\n",
      "         0.2330, -2.0016, -0.7948, -1.2435, -0.9076,  1.4051,  0.6296,  0.4476,\n",
      "        -1.0222,  0.2877,  0.6267, -0.3940, -0.2213, -0.4024, -1.0597, -0.0791,\n",
      "        -0.7520,  1.3535,  0.7242,  0.2306, -1.4361, -0.6642, -0.4994, -1.0108,\n",
      "        -0.4447, -0.1365, -0.2180, -0.4163, -0.6682, -0.8944,  0.5383, -0.5151,\n",
      "        -0.4434,  0.7432, -0.2927,  2.5185,  1.4571,  1.2573, -0.3342, -0.1740,\n",
      "        -1.1326, -0.7708, -1.1251,  1.8187])...\n",
      "Java.: tensor([ 1.4563,  0.7731, -1.0542, -0.8290,  1.2259, -0.6466, -0.2215, -0.2282,\n",
      "        -0.6427, -0.2761,  0.0834, -0.5931,  0.7267, -0.1706,  0.6043, -1.0172,\n",
      "         0.6518, -0.7707,  0.3850, -0.3600,  0.6634,  0.5658,  0.2436, -0.5579,\n",
      "         0.4146,  1.3936,  0.5955,  0.1688,  0.1410, -0.2511, -0.6533, -0.2575,\n",
      "         0.2635, -1.0774, -1.0127,  0.8909, -0.6544, -0.1801, -0.4202,  1.3132,\n",
      "         1.2376,  1.2850, -1.9982, -0.8459,  0.1434,  1.6802, -0.7355,  1.8265,\n",
      "        -0.1938,  0.7451, -1.5922,  0.3485, -1.1072, -0.5425,  0.4680,  0.5684,\n",
      "         0.7110, -0.2159, -0.6140,  0.1244, -0.9224,  2.0113,  0.3141,  1.1058,\n",
      "         0.5932,  2.0740,  0.8010, -1.7253,  0.1185,  0.5508, -0.1090,  0.0724,\n",
      "         0.3735, -1.1330, -0.7922,  1.0090, -0.3426, -1.0826,  0.7791,  1.2850,\n",
      "         0.4544,  0.0739, -0.4257, -0.4085, -0.6512,  0.2835,  0.6193, -0.0947,\n",
      "        -1.6931, -1.2205,  2.0730,  0.3107,  0.9022, -1.1194,  1.2667,  0.7262,\n",
      "        -1.9019,  1.0915, -0.2370,  0.1837])...\n",
      "don't: tensor([-2.8440e+00, -1.9363e-01, -7.5126e-02, -2.0313e-01,  7.9412e-01,\n",
      "         6.2773e-01, -1.7795e-01, -1.3584e+00,  6.4354e-01, -4.2521e-02,\n",
      "        -2.1143e-01, -1.6331e+00,  8.9741e-01,  1.1288e+00,  2.1056e+00,\n",
      "         6.9413e-02, -6.5986e-01,  9.1347e-01, -8.6197e-01,  1.4354e-01,\n",
      "        -8.3209e-01, -9.8591e-01, -1.6290e+00, -1.6359e+00,  3.4271e-02,\n",
      "         1.2927e-01, -1.8956e-01,  1.7283e+00,  3.9554e-02,  8.9140e-01,\n",
      "         2.2269e-01, -6.2578e-01,  1.2572e+00, -1.1542e+00, -3.2424e-01,\n",
      "         1.3118e+00, -2.1474e-01,  2.0227e+00, -1.4773e+00, -1.2706e+00,\n",
      "        -7.6672e-01, -9.9494e-03,  1.8809e+00, -1.9428e+00, -8.2035e-01,\n",
      "        -1.0412e+00, -1.4391e+00, -2.6986e-01, -8.0794e-01, -2.3761e+00,\n",
      "         3.4319e-01, -1.1568e+00, -9.5616e-01,  2.0872e-01,  2.4106e-02,\n",
      "         1.0105e-01, -1.2689e+00, -3.8526e-01, -2.8037e-01,  9.2188e-01,\n",
      "        -2.0141e+00, -2.7458e-01,  6.9343e-01, -2.3385e-01,  1.0534e+00,\n",
      "        -1.1650e+00,  5.8143e-01,  9.3766e-01,  2.4208e+00, -4.9593e-01,\n",
      "        -2.0893e-01, -1.6798e+00, -2.4630e-01,  1.3768e+00, -1.5808e-01,\n",
      "        -1.0462e+00, -5.8354e-02, -4.5084e-01,  6.3716e-01, -8.4579e-01,\n",
      "        -9.8474e-02,  1.6833e+00,  3.7485e-01, -1.0960e+00, -7.9003e-01,\n",
      "        -1.2776e+00, -4.9356e-01, -9.2721e-01, -2.9664e-01,  1.5238e-03,\n",
      "         1.2164e+00,  1.9888e+00,  4.3912e-01,  1.1970e+00,  4.8753e-01,\n",
      "        -8.3455e-01,  1.8339e+00, -7.7845e-01,  8.2013e-01, -2.0571e-01])...\n"
     ]
    }
   ],
   "execution_count": 19
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "用周围词预测中心词，把周围词的词向量取平均，然后预测对应的中心词，预测结果中中心词对应的输出概率要越高越好。",
   "id": "474ab5655baaafb9"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## SkipGram和CBOW的对比\n",
    "\n",
    "SkipGram在训练时，是一个词预测多个词，然后取概率最高的，所以它更能找出中心词和哪个词是更为接近的，也就是更能识别出词和词之间的语义关系。\n",
    "SkipGram在训练时，是按词对来进行训练的，所以训练起来需要的时间相对更久一点。\n",
    "SkipGram相当于训练的更精细一点，适合小数据集。\n",
    "\n",
    "CBOW是多个词预测一个词，训练会更快，CBOW训练的更粗放一点，适合大数据集。"
   ],
   "id": "be786fcd5ec2884"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
