{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 2,
      "metadata": {
        "id": "32CW_ixCjTAr"
      },
      "outputs": [],
      "source": [
        "import jieba\n",
        "import pandas as pd\n",
        "import torch\n",
        "import numpy as np\n",
        "from torch import nn\n",
        "from transformers import TrainingArguments, Trainer, TrainerCallback\n",
        "from torch.utils.tensorboard import SummaryWriter"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 3,
      "metadata": {
        "id": "C9Q_7LUZjTAu"
      },
      "outputs": [],
      "source": [
        "class Vocab:\n",
        "    def __init__(self, vocab_file, stop_words_file=None):\n",
        "        self.stop_words_file = self.load_stop_words(stop_words_file)\n",
        "        self.idx2word, self.word2idx, self.words = self.load_vocab(vocab_file)\n",
        "        self.word_size = len(self.words)\n",
        "        self.vocab_size = len(self.idx2word)\n",
        "\n",
        "    def load_vocab(self, vocab_file):\n",
        "        idx2word = {}\n",
        "        word2idx = {}\n",
        "\n",
        "        words = []\n",
        "        contents = pd.read_csv(vocab_file, encoding=\"GBK\", header=None)\n",
        "\n",
        "        for idx, row in contents.iterrows():\n",
        "            line = row[0]\n",
        "            if not self.stop_words_file:\n",
        "                current_line_words = [\n",
        "                    word for word in jieba.cut(line) if word not in self.stop_words_file\n",
        "                ]\n",
        "            else:\n",
        "                current_line_words = list(jieba.cut(line))\n",
        "            words.extend(current_line_words)\n",
        "\n",
        "        for idx, word in enumerate(set(words)):\n",
        "            idx2word[idx] = word\n",
        "            word2idx[word] = idx\n",
        "        return idx2word, word2idx, words\n",
        "\n",
        "    def load_stop_words(self, stop_words_file):\n",
        "        if stop_words_file is None:\n",
        "            return set()\n",
        "        else:\n",
        "            with open(stop_words_file, \"r\") as f:\n",
        "                return set(f.read().splitlines())\n",
        "\n",
        "    def get_idx(self, word):\n",
        "        return self.word2idx[word]\n",
        "\n",
        "    def get_word(self, idx):\n",
        "        return self.idx2word[idx]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 4,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "UpG5xJY-jTAv",
        "outputId": "0506afb8-67a4-460f-c892-1c41310a675c"
      },
      "outputs": [
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Building prefix dict from the default dictionary ...\n",
            "DEBUG:jieba:Building prefix dict from the default dictionary ...\n",
            "Loading model from cache /tmp/jieba.cache\n",
            "DEBUG:jieba:Loading model from cache /tmp/jieba.cache\n",
            "Loading model cost 1.740 seconds.\n",
            "DEBUG:jieba:Loading model cost 1.740 seconds.\n",
            "Prefix dict has been built successfully.\n",
            "DEBUG:jieba:Prefix dict has been built successfully.\n"
          ]
        }
      ],
      "source": [
        "vocab = Vocab(\"./数学原始数据.csv\", \"./stopwords.txt\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 5,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "YvhJhpiAjTAv",
        "outputId": "54c03f1b-b28d-4270-9d86-4c8c41eff0cb"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "(152832, 5296)"
            ]
          },
          "execution_count": 5,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "vocab.word_size, vocab.vocab_size"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 6,
      "metadata": {
        "id": "N7K6U7u2jTAw"
      },
      "outputs": [],
      "source": [
        "class MyDataset(torch.utils.data.Dataset):\n",
        "    def __init__(self, ngram: int, vocab: Vocab):\n",
        "        self.ngram = ngram\n",
        "        self.vocab = vocab\n",
        "        self.word_size = vocab.word_size\n",
        "        self.vocab_size = vocab.vocab_size\n",
        "\n",
        "    def __len__(self):\n",
        "        return self.word_size - 2 * self.ngram - 1\n",
        "\n",
        "    def __getitem__(self, idx):\n",
        "        left_idx = idx\n",
        "        right_idx = idx + 2 * self.ngram + 1\n",
        "        words = self.vocab.words[left_idx:right_idx]\n",
        "        current_word = words.pop(self.ngram)\n",
        "        label = self.vocab.get_idx(current_word)\n",
        "\n",
        "        another_word = [self.vocab.get_idx(word) for word in words]\n",
        "        return {\n",
        "            \"inputs\": torch.tensor(another_word, dtype=torch.long),\n",
        "            \"labels\": torch.tensor(label, dtype=torch.long),\n",
        "        }"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 7,
      "metadata": {
        "id": "cpk1StKPjTAw"
      },
      "outputs": [],
      "source": [
        "data = MyDataset(2, vocab)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 8,
      "metadata": {
        "id": "FZJP1lTyjTAw"
      },
      "outputs": [],
      "source": [
        "data_iter = torch.utils.data.DataLoader(data, batch_size=512, shuffle=True)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 9,
      "metadata": {
        "id": "0YsmRgMCjTAx"
      },
      "outputs": [],
      "source": [
        "class Net(nn.Module):\n",
        "    def __init__(self, vocab_size, embedding_size):\n",
        "        super().__init__()\n",
        "        self.vocab_size = vocab_size\n",
        "        self.embedding_size = embedding_size\n",
        "\n",
        "        self.model = nn.Sequential(\n",
        "            nn.Embedding(\n",
        "                vocab_size,\n",
        "                embedding_size,\n",
        "            ),\n",
        "            nn.Linear(\n",
        "                embedding_size,\n",
        "                vocab_size,\n",
        "                bias=True,\n",
        "            ),\n",
        "        )\n",
        "\n",
        "    def forward(self, inputs, labels=None):\n",
        "\n",
        "        loss_fn = nn.CrossEntropyLoss()\n",
        "        batch_size, ngram = inputs.shape\n",
        "        # [batch_size, ngram] -> [batch_size * ngram]\n",
        "        inputs = inputs.flatten()\n",
        "        # [batch_size * ngram] -> [batch_size * ngram, vocab_size]\n",
        "        inputs_logits = self.model(inputs)\n",
        "        # [batch_size * ngram, vocab_size] -> [batch_size, ngram, vocab_size]\n",
        "        inputs_logits = inputs_logits.reshape(batch_size, ngram, self.vocab_size)\n",
        "        # [batch_size, ngram, vocab_size] -> [batch_size, vocab_size]\n",
        "        inputs_logits = torch.mean(inputs_logits, dim=1)\n",
        "        if labels is not None:\n",
        "            # [batch_size, vocab_size] 和 [batch_size, vocab_size]\n",
        "            loss = loss_fn(inputs_logits, labels)\n",
        "            return {\"logits\": inputs_logits, \"loss\": loss}\n",
        "        else:\n",
        "            return {\"logits\": inputs_logits}"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 10,
      "metadata": {
        "id": "rgYoX7MYjTAx"
      },
      "outputs": [],
      "source": [
        "model = Net(vocab.vocab_size, 512)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 11,
      "metadata": {
        "id": "1xCjkYuEjTAx"
      },
      "outputs": [],
      "source": [
        "class MyCallBacks(TrainerCallback):\n",
        "\n",
        "    def on_train_begin(\n",
        "        self, args, state, control, model, optimizer, lr_scheduler, **kwargs\n",
        "    ):\n",
        "        with SummaryWriter(\"./word2vec\") as writer:\n",
        "            inputs = torch.tensor([[1, 2, 3, 4]], dtype=torch.long)\n",
        "            writer.add_graph(model, inputs, use_strict_trace=False)\n",
        "\n",
        "        print(\"\\nStarting training\")\n",
        "        print(f\"\\nUsing optimizer: {optimizer}\")\n",
        "        print(f\"\\nUsing lr_scheduler: {lr_scheduler}\")\n",
        "\n",
        "    def on_train_end(self, args, state, control, optimizer, **kwargs):\n",
        "        print(f\"\\nlr: {optimizer.param_groups[0]['lr']}\")\n",
        "\n",
        "    def on_save(self, args, state, control, **kwargs):\n",
        "        print(\"\\nSaving model\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 12,
      "metadata": {
        "id": "6aww0jGFjTAy"
      },
      "outputs": [],
      "source": [
        "training_args = TrainingArguments(\n",
        "    output_dir=\"./word2vec\",\n",
        "    num_train_epochs=3,\n",
        "    logging_strategy=\"steps\",\n",
        "    logging_dir=\"./word2vec\",\n",
        "    save_strategy=\"epoch\",\n",
        "    use_cpu=False,\n",
        "    save_total_limit=3,\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 13,
      "metadata": {
        "id": "9BFHd0VKjTAy"
      },
      "outputs": [],
      "source": [
        "trainer = Trainer(\n",
        "    model=model,\n",
        "    args=training_args,\n",
        "    train_dataset=data,\n",
        "    optimizers=(torch.optim.SGD(model.parameters(), 0.05), None),\n",
        "    callbacks=[MyCallBacks],\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 14,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        },
        "id": "VoUKUW2QjTAy",
        "outputId": "f95e7b7a-7d11-4d2c-8f71-c900310bdbf7"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "\n",
            "Starting training\n"
          ]
        },
        {
          "data": {
            "text/html": [
              "\n",
              "    <div>\n",
              "      \n",
              "      <progress value='57312' max='57312' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
              "      [57312/57312 02:49, Epoch 3/3]\n",
              "    </div>\n",
              "    <table border=\"1\" class=\"dataframe\">\n",
              "  <thead>\n",
              " <tr style=\"text-align: left;\">\n",
              "      <th>Step</th>\n",
              "      <th>Training Loss</th>\n",
              "    </tr>\n",
              "  </thead>\n",
              "  <tbody>\n",
              "    <tr>\n",
              "      <td>500</td>\n",
              "      <td>8.403800</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>1000</td>\n",
              "      <td>8.053700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>1500</td>\n",
              "      <td>7.836400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>2000</td>\n",
              "      <td>7.587500</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>2500</td>\n",
              "      <td>7.427900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>3000</td>\n",
              "      <td>7.290900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>3500</td>\n",
              "      <td>7.209700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>4000</td>\n",
              "      <td>6.999400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>4500</td>\n",
              "      <td>6.879000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>5000</td>\n",
              "      <td>6.820200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>5500</td>\n",
              "      <td>6.747600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>6000</td>\n",
              "      <td>6.658300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>6500</td>\n",
              "      <td>6.618500</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>7000</td>\n",
              "      <td>6.550200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>7500</td>\n",
              "      <td>6.539700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>8000</td>\n",
              "      <td>6.412200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>8500</td>\n",
              "      <td>6.340400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>9000</td>\n",
              "      <td>6.319600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>9500</td>\n",
              "      <td>6.248600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>10000</td>\n",
              "      <td>6.201200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>10500</td>\n",
              "      <td>6.231600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>11000</td>\n",
              "      <td>6.123100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>11500</td>\n",
              "      <td>6.182100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>12000</td>\n",
              "      <td>6.074700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>12500</td>\n",
              "      <td>6.133100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>13000</td>\n",
              "      <td>6.120400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>13500</td>\n",
              "      <td>6.067300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>14000</td>\n",
              "      <td>6.007200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>14500</td>\n",
              "      <td>5.997900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>15000</td>\n",
              "      <td>5.915500</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>15500</td>\n",
              "      <td>5.968700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>16000</td>\n",
              "      <td>5.861400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>16500</td>\n",
              "      <td>5.877700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>17000</td>\n",
              "      <td>5.849300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>17500</td>\n",
              "      <td>5.861800</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>18000</td>\n",
              "      <td>5.829000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>18500</td>\n",
              "      <td>5.812600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>19000</td>\n",
              "      <td>5.841600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>19500</td>\n",
              "      <td>5.625400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>20000</td>\n",
              "      <td>5.630700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>20500</td>\n",
              "      <td>5.654400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>21000</td>\n",
              "      <td>5.626900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>21500</td>\n",
              "      <td>5.591700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>22000</td>\n",
              "      <td>5.662300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>22500</td>\n",
              "      <td>5.530000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>23000</td>\n",
              "      <td>5.556200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>23500</td>\n",
              "      <td>5.538000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>24000</td>\n",
              "      <td>5.531000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>24500</td>\n",
              "      <td>5.485100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>25000</td>\n",
              "      <td>5.514200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>25500</td>\n",
              "      <td>5.522900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>26000</td>\n",
              "      <td>5.403300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>26500</td>\n",
              "      <td>5.430400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>27000</td>\n",
              "      <td>5.569100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>27500</td>\n",
              "      <td>5.419300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>28000</td>\n",
              "      <td>5.587900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>28500</td>\n",
              "      <td>5.491300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>29000</td>\n",
              "      <td>5.386200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>29500</td>\n",
              "      <td>5.396300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>30000</td>\n",
              "      <td>5.301700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>30500</td>\n",
              "      <td>5.493000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>31000</td>\n",
              "      <td>5.472700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>31500</td>\n",
              "      <td>5.444300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>32000</td>\n",
              "      <td>5.332900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>32500</td>\n",
              "      <td>5.464600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>33000</td>\n",
              "      <td>5.366300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>33500</td>\n",
              "      <td>5.339700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>34000</td>\n",
              "      <td>5.383300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>34500</td>\n",
              "      <td>5.367000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>35000</td>\n",
              "      <td>5.326100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>35500</td>\n",
              "      <td>5.315800</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>36000</td>\n",
              "      <td>5.314400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>36500</td>\n",
              "      <td>5.357000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>37000</td>\n",
              "      <td>5.342400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>37500</td>\n",
              "      <td>5.407300</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>38000</td>\n",
              "      <td>5.337100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>38500</td>\n",
              "      <td>5.260600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>39000</td>\n",
              "      <td>5.301000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>39500</td>\n",
              "      <td>5.239800</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>40000</td>\n",
              "      <td>5.248000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>40500</td>\n",
              "      <td>5.189900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>41000</td>\n",
              "      <td>5.216600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>41500</td>\n",
              "      <td>5.221700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>42000</td>\n",
              "      <td>5.239600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>42500</td>\n",
              "      <td>5.235600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>43000</td>\n",
              "      <td>5.125600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>43500</td>\n",
              "      <td>5.194800</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>44000</td>\n",
              "      <td>5.146000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>44500</td>\n",
              "      <td>5.199500</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>45000</td>\n",
              "      <td>5.240900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>45500</td>\n",
              "      <td>5.223700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>46000</td>\n",
              "      <td>5.240000</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>46500</td>\n",
              "      <td>5.153500</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>47000</td>\n",
              "      <td>5.078100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>47500</td>\n",
              "      <td>5.171600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>48000</td>\n",
              "      <td>5.211100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>48500</td>\n",
              "      <td>5.078800</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>49000</td>\n",
              "      <td>5.178900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>49500</td>\n",
              "      <td>5.198500</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>50000</td>\n",
              "      <td>5.240400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>50500</td>\n",
              "      <td>5.212700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>51000</td>\n",
              "      <td>5.147800</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>51500</td>\n",
              "      <td>5.205900</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>52000</td>\n",
              "      <td>5.225700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>52500</td>\n",
              "      <td>5.115100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>53000</td>\n",
              "      <td>5.159400</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>53500</td>\n",
              "      <td>5.208200</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>54000</td>\n",
              "      <td>5.220500</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>54500</td>\n",
              "      <td>5.157700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>55000</td>\n",
              "      <td>5.159600</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>55500</td>\n",
              "      <td>5.178100</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>56000</td>\n",
              "      <td>5.200700</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>56500</td>\n",
              "      <td>5.186800</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <td>57000</td>\n",
              "      <td>5.206400</td>\n",
              "    </tr>\n",
              "  </tbody>\n",
              "</table><p>"
            ],
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ]
          },
          "metadata": {},
          "output_type": "display_data"
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "\n",
            "Saving model\n",
            "\n",
            "Saving model\n",
            "\n",
            "Saving model\n",
            "\n",
            "Saving model\n",
            "\n",
            "Ending training\n"
          ]
        },
        {
          "data": {
            "text/plain": [
              "TrainOutput(global_step=57312, training_loss=5.714975894861951, metrics={'train_runtime': 171.6222, 'train_samples_per_second': 2671.456, 'train_steps_per_second': 333.943, 'total_flos': 0.0, 'train_loss': 5.714975894861951, 'epoch': 3.0})"
            ]
          },
          "execution_count": 14,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "trainer.train()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 15,
      "metadata": {
        "id": "n0RRWjyEjTAy"
      },
      "outputs": [],
      "source": [
        "torch.save(model.state_dict(), \"./word2vec.pth\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 16,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "q4UwKDmvmlE_",
        "outputId": "467d093c-2c71-4ac0-d77e-5735e76c2e1e"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "Net(\n",
              "  (model): Sequential(\n",
              "    (0): Embedding(5296, 512)\n",
              "    (1): Linear(in_features=512, out_features=5296, bias=True)\n",
              "  )\n",
              ")"
            ]
          },
          "execution_count": 16,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "model.eval()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 17,
      "metadata": {
        "id": "MaKaBZyR8x56"
      },
      "outputs": [],
      "source": [
        "def cos(a, b):\n",
        "    return a.dot(b) / (a.norm() * b.norm())"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 18,
      "metadata": {
        "id": "YU6J11ow8x6E"
      },
      "outputs": [],
      "source": [
        "encoder = model.model[0]\n",
        "\n",
        "token = \"算术\"\n",
        "embedding1 = encoder(torch.tensor([vocab.get_idx(token)], device=\"cuda\"))\n",
        "\n",
        "token2similarity = {}"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 19,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "TvkETX3q8x6L",
        "outputId": "0ae40f2c-2898-43e0-8fd3-5b3b9eba08f9"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "['算术', '点积', 'radix', '开立方', '出', '引圆', '180', '圆弧', '右上角', '以少']"
            ]
          },
          "execution_count": 19,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "for idx, word in vocab.idx2word.items():\n",
        "    embedding2 = encoder(torch.tensor([idx], device=\"cuda\"))\n",
        "    cos_similarity = cos(embedding1.flatten(), embedding2.flatten()).item()\n",
        "    token2similarity[word] = cos_similarity\n",
        "\n",
        "sorted(token2similarity, key=token2similarity.get, reverse=True)[:10]"
      ]
    }
  ],
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "gpuType": "T4",
      "provenance": []
    },
    "kernelspec": {
      "display_name": "unlock-hf",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.9.20"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
