{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "da1b51fb",
   "metadata": {},
   "source": [
    "# Project 3: Training Word Embedding\n",
    "\n",
    "## Requirements\n",
    "\n",
    "1. **Implement the Word2Vec and Glove algorithms from scratch using Numpy.**\n",
    "2. **You are required to implement at least two algorithms**, such as:\n",
    "   - Skip-gram\n",
    "   - CBOW\n",
    "   - Glove\n",
    "   - FastText\n",
    "3. **Highly suggest to refer to the Genism library** to provide functions such as:\n",
    "   - Save and load pretrained word embedding dense vectors\n",
    "   - Similarity calculations with word vectors\n",
    "4. **Training with toy datasets is enough.** Here are some references:\n",
    "   - [Text 8 Dataset](https://www.kaggle.com/datasets/gupta24789/text8-word-embedding)\n",
    "   - [IMDB Dataset](https://www.kaggle.com/datasets/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews)\n",
    "   - [Small Chinese Corpus](https://github.com/crownpku/Small-Chinese-Corpus)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "9fc5c3e7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import random\n",
    "from collections import defaultdict, Counter\n",
    "from sklearn.preprocessing import normalize\n",
    "import pandas as pd\n",
    "import re"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "84bd8f70",
   "metadata": {},
   "source": [
    "## 1. Implement the Skip-gram algorithm."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "ce71d485",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Word2Vec:\n",
    "    def __init__(self, dim=100, window_size=2, epochs=1000, learning_rate=0.01, model_type='skipgram', negative_samples=5, frequency_threshold=5):\n",
    "        self.dim = dim\n",
    "        self.window_size = window_size\n",
    "        self.epochs = epochs\n",
    "        self.learning_rate = learning_rate\n",
    "        self.model_type = model_type\n",
    "        self.negative_samples = negative_samples\n",
    "        self.frequency_threshold = frequency_threshold\n",
    "        self.word_indices = {}\n",
    "        self.indices_word = {}\n",
    "        self.W1 = None\n",
    "        self.W2 = None\n",
    "\n",
    "    def _preprocess(self, text):\n",
    "        words = text.lower().split()\n",
    "        word_counts = Counter(words)\n",
    "        trimmed_words = [word for word in words if word_counts[word] > self.frequency_threshold]\n",
    "        self.word_indices = {word: i for i, word in enumerate(set(trimmed_words))}\n",
    "        self.indices_word = {i: word for word, i in self.word_indices.items()}\n",
    "        return trimmed_words\n",
    "\n",
    "    def _generate_pairs(self, words):\n",
    "        pairs = []\n",
    "        for i, word in enumerate(words):\n",
    "            for j in range(max(0, i - self.window_size), min(len(words), i + self.window_size + 1)):\n",
    "                if i != j:\n",
    "                    pairs.append((self.word_indices[word], self.word_indices[words[j]]))\n",
    "        return np.array(pairs)\n",
    "\n",
    "    def train(self, text):\n",
    "        words = self._preprocess(text)\n",
    "        pairs = self._generate_pairs(words)\n",
    "        vocab_size = len(self.word_indices)\n",
    "        self.W1 = np.random.randn(vocab_size, self.dim) * 0.01\n",
    "        self.W2 = np.random.randn(vocab_size, self.dim) * 0.01\n",
    "        epsilon = 1e-10\n",
    "        initial_lr = self.learning_rate\n",
    "\n",
    "        for epoch in range(self.epochs):\n",
    "            loss = 0\n",
    "            for data, target in pairs:\n",
    "                x = np.zeros(vocab_size)\n",
    "                x[data] = 1\n",
    "                h = self.W1[data]\n",
    "                scores = self.W2 @ h\n",
    "                y_pred = self._softmax(scores)\n",
    "                \n",
    "                targets = [target] + random.sample(range(vocab_size), self.negative_samples)\n",
    "                label = np.array([1] + [0] * self.negative_samples)\n",
    "                \n",
    "                y_pred[targets] = np.clip(y_pred[targets], epsilon, 1 - epsilon)\n",
    "                loss += -np.sum(label * np.log(y_pred[targets]))\n",
    "                \n",
    "                err = y_pred\n",
    "                err[targets] -= label\n",
    "                \n",
    "                grad_w1 = np.outer(x, np.dot(self.W2[targets].T, err[targets]))\n",
    "                grad_w2 = np.outer(h, err[targets])\n",
    "                \n",
    "                grad_w1 = np.clip(grad_w1, -1, 1)\n",
    "                grad_w2 = np.clip(grad_w2, -1, 1)\n",
    "                \n",
    "                self.W1 -= self.learning_rate * grad_w1\n",
    "                self.W2[targets] -= self.learning_rate * grad_w2.T\n",
    "                \n",
    "            self.learning_rate = initial_lr * (1 - epoch / self.epochs)\n",
    "\n",
    "            if epoch % 1 == 0:\n",
    "                print(f'Epoch: {epoch}, Loss: {loss:.4f}')\n",
    "\n",
    "    def _softmax(self, x):\n",
    "        e_x = np.exp(x - np.max(x))\n",
    "        return e_x / e_x.sum(axis=0)\n",
    "\n",
    "    def save_model(self, filepath):\n",
    "        np.savez(filepath, W1=self.W1, W2=self.W2, word_indices=self.word_indices)\n",
    "\n",
    "    def load_model(self, filepath):\n",
    "        data = np.load(filepath, allow_pickle=True)\n",
    "        self.W1 = data['W1']\n",
    "        self.W2 = data['W2']\n",
    "        self.word_indices = data['word_indices'].item()\n",
    "        self.indices_word = {i: word for word, i in self.word_indices.items()}\n",
    "\n",
    "    def get_word_vector(self, word):\n",
    "        return self.W1[self.word_indices[word]] if word in self.word_indices else None\n",
    "\n",
    "    def most_similar(self, word, topn=10):\n",
    "        if word not in self.word_indices:\n",
    "            return []\n",
    "        word_vec = self.get_word_vector(word)\n",
    "        if word_vec is None:\n",
    "            return []\n",
    "        similarities = np.dot(self.W1, word_vec)\n",
    "        best_indices = np.argsort(-similarities)[:topn+1]\n",
    "        return [(self.indices_word[i], similarities[i]) for i in best_indices if i != self.word_indices[word]]\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cb4e2042",
   "metadata": {},
   "source": [
    "## 2. Implement the Glove algorithm."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "f5e1387f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "class GloVe:\n",
    "    def __init__(self, dim=100, epochs=1000, learning_rate=0.05, x_max=100, alpha=0.75):\n",
    "        self.dim = dim\n",
    "        self.epochs = epochs\n",
    "        self.learning_rate = learning_rate\n",
    "        self.x_max = x_max\n",
    "        self.alpha = alpha\n",
    "        self.W = None\n",
    "        self.biases = None\n",
    "        self.word_indices = {}\n",
    "        self.indices_word = {}\n",
    "        self.cooccur = None\n",
    "\n",
    "    def build_cooccur_matrix(self, text, window_size=2):\n",
    "        words = text.lower().split()\n",
    "        vocab = list(set(words))\n",
    "        self.word_indices = {word: i for i, word in enumerate(vocab)}\n",
    "        self.indices_word = {i: word for word, i in self.word_indices.items()}\n",
    "        cooccur = np.zeros((len(vocab), len(vocab)), dtype=np.float32)\n",
    "\n",
    "        for i in range(len(words)):\n",
    "            target = self.word_indices[words[i]]\n",
    "            context = [self.word_indices[words[j]] for j in range(max(0, i-window_size), min(len(words), i+window_size+1)) if i != j]\n",
    "            for ctx in context:\n",
    "                distance = abs(i - (i + (ctx - target)))\n",
    "                if distance > 0:\n",
    "                    cooccur[target, ctx] += 1.0 / distance\n",
    "\n",
    "        self.cooccur = cooccur\n",
    "\n",
    "    def train(self):\n",
    "        vocab_size = len(self.word_indices)\n",
    "        self.W = np.random.randn(vocab_size, self.dim) * 0.01\n",
    "        self.biases = np.zeros(vocab_size)\n",
    "\n",
    "        for epoch in range(self.epochs):\n",
    "            loss = 0\n",
    "            for i in range(vocab_size):\n",
    "                for j in range(vocab_size):\n",
    "                    if self.cooccur[i, j] > 0:\n",
    "                        weight = (self.cooccur[i, j] / self.x_max) ** self.alpha if self.cooccur[i, j] < self.x_max else 1\n",
    "                        diff = np.dot(self.W[i] - self.W[j], self.W[i] - self.W[j]) + self.biases[i] + self.biases[j] - np.log(self.cooccur[i, j])\n",
    "                        loss += 0.5 * weight * diff ** 2\n",
    "\n",
    "                        grad_main = weight * diff * (self.W[i] - self.W[j])\n",
    "                        grad_bias_main = weight * diff\n",
    "\n",
    "                        self.W[i] -= self.learning_rate * grad_main\n",
    "                        self.biases[i] -= self.learning_rate * grad_bias_main\n",
    "\n",
    "                        self.W[j] += self.learning_rate * grad_main\n",
    "                        self.biases[j] += self.learning_rate * grad_bias_main\n",
    "\n",
    "            if epoch % 1 == 0:\n",
    "                print(f'Epoch: {epoch}, Loss: {loss:.4f}')\n",
    "\n",
    "    def save_model(self, filepath):\n",
    "        np.savez(filepath, W=self.W, biases=self.biases, word_indices=self.word_indices)\n",
    "\n",
    "    def load_model(self, filepath):\n",
    "        data = np.load(filepath, allow_pickle=True)\n",
    "        self.W = data['W']\n",
    "        self.biases = data['biases']\n",
    "        self.word_indices = data['word_indices'].item()\n",
    "        self.indices_word = {i: word for word, i in self.word_indices.items()}\n",
    "\n",
    "    def get_word_vector(self, word):\n",
    "        return self.W[self.word_indices[word]] if word in self.word_indices else None\n",
    "\n",
    "    def most_similar(self, word, topn=10):\n",
    "        if word not in self.word_indices:\n",
    "            return []\n",
    "        word_vec = self.get_word_vector(word)\n",
    "        if word_vec is None:\n",
    "            return []\n",
    "        similarities = np.dot(self.W, word_vec)\n",
    "        best_indices = np.argsort(-similarities)[:topn+1]\n",
    "        return [(self.indices_word[i], similarities[i]) for i in best_indices if i != self.word_indices[word]]\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e70730af",
   "metadata": {},
   "source": [
    "## 3. Train Text 8 dataset on Skip-gram and Glove algorithms."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "c1e4e20b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载Text 8数据集\n",
    "def load_text8(file_path, sample_size=10000):\n",
    "    with open(file_path, 'r') as file:\n",
    "        text = file.read(sample_size)\n",
    "    return text\n",
    "text8 = load_text8('./Text 8/text8')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "5a39044e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 0, Loss: 10650.8059\n",
      "Epoch: 1, Loss: 10650.5192\n",
      "Epoch: 2, Loss: 10650.2174\n",
      "Epoch: 3, Loss: 10649.9027\n",
      "Epoch: 4, Loss: 10649.5710\n",
      "Epoch: 5, Loss: 10649.2150\n",
      "Epoch: 6, Loss: 10648.8426\n",
      "Epoch: 7, Loss: 10648.4455\n",
      "Epoch: 8, Loss: 10648.0205\n",
      "Epoch: 9, Loss: 10647.5759\n",
      "Epoch: 10, Loss: 10647.1059\n",
      "Epoch: 11, Loss: 10646.6449\n",
      "Epoch: 12, Loss: 10646.1775\n",
      "Epoch: 13, Loss: 10645.7181\n",
      "Epoch: 14, Loss: 10645.2717\n",
      "Epoch: 15, Loss: 10644.8583\n",
      "Epoch: 16, Loss: 10644.4836\n",
      "Epoch: 17, Loss: 10644.1665\n",
      "Epoch: 18, Loss: 10643.9044\n",
      "Epoch: 19, Loss: 10643.7143\n"
     ]
    }
   ],
   "source": [
    "#在Word2Vec模型上训练\n",
    "word2vec = Word2Vec(dim=100, window_size=2, epochs=20, learning_rate=0.001, model_type='skipgram')\n",
    "word2vec.train(text8)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ff1f5aea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "to\n",
      "[('the', 0.008786850290470131), ('of', 0.005395792940741286), ('in', 0.005305199983392854), ('proudhon', 0.0038835056202490186), ('and', 0.0035716597740595166), ('was', 0.003276293754996942), ('anarchism', 0.002349936212343674), ('first', 0.0023194884117913716), ('a', 0.0023104933603646854), ('property', 0.002191737008621615)]\n"
     ]
    }
   ],
   "source": [
    "# 输出相似的成语\n",
    "print(word2vec.indices_word[0])\n",
    "print(word2vec.most_similar(word2vec.indices_word[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9dbb4744",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存和加载模型\n",
    "word2vec.save_model('./word2vec_text8_model.npz')\n",
    "word2vec.load_model('./word2vec_text8_model.npz')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "96f97f0a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 0, Loss: 50.6913\n",
      "Epoch: 1, Loss: 50.6766\n",
      "Epoch: 2, Loss: 50.6634\n",
      "Epoch: 3, Loss: 50.6513\n",
      "Epoch: 4, Loss: 50.6403\n",
      "Epoch: 5, Loss: 50.6302\n",
      "Epoch: 6, Loss: 50.6208\n",
      "Epoch: 7, Loss: 50.6122\n",
      "Epoch: 8, Loss: 50.6042\n",
      "Epoch: 9, Loss: 50.5967\n",
      "Epoch: 10, Loss: 50.5897\n",
      "Epoch: 11, Loss: 50.5832\n",
      "Epoch: 12, Loss: 50.5770\n",
      "Epoch: 13, Loss: 50.5711\n",
      "Epoch: 14, Loss: 50.5656\n",
      "Epoch: 15, Loss: 50.5603\n",
      "Epoch: 16, Loss: 50.5553\n",
      "Epoch: 17, Loss: 50.5504\n",
      "Epoch: 18, Loss: 50.5458\n",
      "Epoch: 19, Loss: 50.5414\n"
     ]
    }
   ],
   "source": [
    "#在Glove模型上训练\n",
    "glove = GloVe(dim=100, epochs=20, learning_rate=0.05)\n",
    "glove.build_cooccur_matrix(text8)\n",
    "glove.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "2601b2d0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "laws\n",
      "[('widely', 0.0023595672460555786), ('all', 0.0022359730772050046), ('originated', 0.0021747211909672677), ('everyone', 0.002152680767277867), ('moment', 0.0019898244512251938), ('who', 0.001944211252751098), ('believed', 0.0019405385453515915), ('resources', 0.0019390282992588046), ('three', 0.001862212587885343), ('when', 0.0018541410064862064)]\n"
     ]
    }
   ],
   "source": [
    "# 输出相似的成语\n",
    "print(glove.indices_word[0])\n",
    "print(glove.most_similar(glove.indices_word[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0bd2a8e4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存和加载模型\n",
    "glove.save_model('./glove_text8_model.npz')\n",
    "glove.load_model('./glove_text8_model.npz')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "94bcb8ec",
   "metadata": {},
   "source": [
    "## 4. Train IMDB dataset on Skip-gram and Glove algorithms."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7c6cb18f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载IMDB数据集\n",
    "def load_imdb_data(csv_file_path, fraction=0.001):\n",
    "    df = pd.read_csv(csv_file_path)\n",
    "    sampled_df = df.sample(frac=fraction)\n",
    "    reviews = sampled_df['review'].tolist()\n",
    "    sentiments = sampled_df['sentiment'].tolist()\n",
    "    return reviews, sentiments\n",
    "\n",
    "reviews, sentiments = load_imdb_data('./IMDB/IMDB Dataset.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "b3c88ded",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 预处理文本数据\n",
    "def preprocess_text(text):\n",
    "    text = text.lower()\n",
    "    text = re.sub(r'<.*?>', '', text)\n",
    "    text = re.sub(r'[^a-z\\s]', '', text)\n",
    "    text = re.sub(r'\\s+', ' ', text).strip()\n",
    "    return text\n",
    "\n",
    "imdb = [preprocess_text(review) for review in reviews]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "3f0e767c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 0, Loss: 202628.5864\n",
      "Epoch: 1, Loss: 202611.3165\n",
      "Epoch: 2, Loss: 202561.2543\n",
      "Epoch: 3, Loss: 202421.4326\n",
      "Epoch: 4, Loss: 202076.1123\n",
      "Epoch: 5, Loss: 201341.4719\n",
      "Epoch: 6, Loss: 200025.4343\n",
      "Epoch: 7, Loss: 198114.3771\n",
      "Epoch: 8, Loss: 196147.6032\n",
      "Epoch: 9, Loss: 195166.6442\n"
     ]
    }
   ],
   "source": [
    "#在Word2Vec模型上训练\n",
    "word2vec = Word2Vec(dim=100, window_size=2, epochs=10, learning_rate=0.001, model_type='skipgram')\n",
    "word2vec.train(' '.join(imdb))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "64b4511a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "new\n",
      "[('the', 0.14172262600727284), ('of', 0.08252839660615743), ('and', 0.0820346977184644), ('a', 0.07853004785783571), ('to', 0.062164443804095595), ('is', 0.05946008799354257), ('in', 0.051556570095139295), ('that', 0.03468949738501693), ('it', 0.02819221808185881), ('with', 0.02519079981931715), ('as', 0.023672415658433508)]\n"
     ]
    }
   ],
   "source": [
    "# 输出相似的成语\n",
    "print(word2vec.indices_word[0])\n",
    "print(word2vec.most_similar(word2vec.indices_word[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6a257e15",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存和加载模型\n",
    "word2vec.save_model('./word2vec_imdb_model.npz')\n",
    "word2vec.load_model('./word2vec_imdb_model.npz')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "a1c25aab",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 0, Loss: 221.6931\n",
      "Epoch: 1, Loss: 221.5992\n",
      "Epoch: 2, Loss: 221.5265\n",
      "Epoch: 3, Loss: 221.4684\n",
      "Epoch: 4, Loss: 221.4206\n",
      "Epoch: 5, Loss: 221.3804\n",
      "Epoch: 6, Loss: 221.3458\n",
      "Epoch: 7, Loss: 221.3155\n",
      "Epoch: 8, Loss: 221.2885\n",
      "Epoch: 9, Loss: 221.2642\n"
     ]
    }
   ],
   "source": [
    "#在Glove模型上训练\n",
    "glove = GloVe(dim=100, epochs=10, learning_rate=0.05)\n",
    "glove.build_cooccur_matrix(' '.join(imdb))\n",
    "glove.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "7eb09d9a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "clubbed\n",
      "[('anne', 0.0028824622236412662), ('couldnt', 0.0027473897518137625), ('balloon', 0.002677511555171568), ('clear', 0.002670140795362146), ('slot', 0.002616906409352636), ('tend', 0.0025933881173338196), ('loved', 0.002581900247170611), ('okay', 0.002534600326806401), ('every', 0.002526296028918147), ('go', 0.0025043702794448706)]\n"
     ]
    }
   ],
   "source": [
    "# 输出相似的成语\n",
    "print(glove.indices_word[0])\n",
    "print(glove.most_similar(glove.indices_word[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9379a38c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存和加载模型\n",
    "glove.save_model('./glove_imdb_model.npz')\n",
    "glove.load_model('./glove_imdb_model.npz')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "00b80c53",
   "metadata": {},
   "source": [
    "## 5. Train Small Chinese Corpus dataset on Skip-gram and Glove algorithms."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "6d215a91",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载chengyu数据集\n",
    "def load_and_process_chengyu(file_path, sample_size=1000):\n",
    "    chengyu_list = []\n",
    "    with open(file_path, 'r', encoding='utf-8') as file:\n",
    "        for line in file:\n",
    "            parts = re.split(r'\\s+', line.strip())\n",
    "            if parts:\n",
    "                chengyu = parts[0]\n",
    "                chengyu_list.append(chengyu)\n",
    "    if len(chengyu_list) > sample_size:\n",
    "        chengyu_list = random.sample(chengyu_list, sample_size)\n",
    "    return chengyu_list\n",
    "\n",
    "chengyu_list = load_and_process_chengyu('./Small Chinese Corpus/chengyu/chengyu.txt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "c50241d8",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 0, Loss: 27589.6259\n",
      "Epoch: 1, Loss: 27588.8303\n",
      "Epoch: 2, Loss: 27588.0317\n",
      "Epoch: 3, Loss: 27587.2664\n",
      "Epoch: 4, Loss: 27586.5356\n",
      "Epoch: 5, Loss: 27585.8385\n",
      "Epoch: 6, Loss: 27585.1740\n",
      "Epoch: 7, Loss: 27584.5427\n",
      "Epoch: 8, Loss: 27583.9446\n",
      "Epoch: 9, Loss: 27583.3810\n",
      "Epoch: 10, Loss: 27582.8527\n",
      "Epoch: 11, Loss: 27582.3616\n",
      "Epoch: 12, Loss: 27581.9083\n",
      "Epoch: 13, Loss: 27581.4954\n",
      "Epoch: 14, Loss: 27581.1233\n",
      "Epoch: 15, Loss: 27580.7943\n",
      "Epoch: 16, Loss: 27580.5097\n",
      "Epoch: 17, Loss: 27580.2709\n",
      "Epoch: 18, Loss: 27580.0789\n",
      "Epoch: 19, Loss: 27579.9349\n"
     ]
    }
   ],
   "source": [
    "#在Word2Vec模型上训练\n",
    "word2vec = Word2Vec(dim=100, window_size=2, epochs=20, learning_rate=0.01, model_type='skipgram', frequency_threshold=0)\n",
    "word2vec.train(' '.join(chengyu_list))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "6f0c53a1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "繁丝急管\n",
      "[('鬼刻神劖', 0.0036781526984206656), ('九鼎一丝', 0.0035925849469023666), ('龙战玄黄', 0.003106373751742319), ('门生故旧', 0.0029789723194660516), ('狗盗鸡啼', 0.0029009120957035894), ('元戎启行', 0.0028889246215081385), ('硃脣玉面', 0.0028390095852340587), ('班衣戏采', 0.0028263440675882347), ('愚夫蠢妇', 0.00264705850099799), ('贞元会合', 0.0025915279658381206)]\n"
     ]
    }
   ],
   "source": [
    "# 输出相似的成语\n",
    "print(word2vec.indices_word[0])\n",
    "print(word2vec.most_similar(word2vec.indices_word[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4227b3b2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存和加载模型\n",
    "word2vec.save_model('./word2vec_chengyu_model.npz')\n",
    "word2vec.load_model('./word2vec_chengyu_model.npz')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "b870802e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 0, Loss: 31.9177\n",
      "Epoch: 1, Loss: 31.9166\n",
      "Epoch: 2, Loss: 31.9155\n",
      "Epoch: 3, Loss: 31.9144\n",
      "Epoch: 4, Loss: 31.9134\n",
      "Epoch: 5, Loss: 31.9123\n",
      "Epoch: 6, Loss: 31.9112\n",
      "Epoch: 7, Loss: 31.9101\n",
      "Epoch: 8, Loss: 31.9091\n",
      "Epoch: 9, Loss: 31.9080\n",
      "Epoch: 10, Loss: 31.9070\n",
      "Epoch: 11, Loss: 31.9059\n",
      "Epoch: 12, Loss: 31.9049\n",
      "Epoch: 13, Loss: 31.9039\n",
      "Epoch: 14, Loss: 31.9028\n",
      "Epoch: 15, Loss: 31.9018\n",
      "Epoch: 16, Loss: 31.9008\n",
      "Epoch: 17, Loss: 31.8998\n",
      "Epoch: 18, Loss: 31.8988\n",
      "Epoch: 19, Loss: 31.8978\n"
     ]
    }
   ],
   "source": [
    "#在Glove模型上训练\n",
    "glove = GloVe(dim=100, epochs=20, learning_rate=0.05)\n",
    "glove.build_cooccur_matrix(' '.join(chengyu_list))\n",
    "glove.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "2bf6ddaa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "繁丝急管\n",
      "[('如汤灌雪', 0.003690556673636809), ('前无古人', 0.0032929271430592964), ('坚强不屈', 0.003178311854220659), ('三纲五常', 0.0029253092082285517), ('一仍旧贯', 0.002875279040970421), ('高朋满座', 0.0027497703547515245), ('白首北面', 0.0026873001453776185), ('改弦易张', 0.0026416904902525165), ('手无缚鸡之力', 0.0025514190639681655), ('改过不吝', 0.002540384507999537)]\n"
     ]
    }
   ],
   "source": [
    "# 输出相似的成语\n",
    "print(glove.indices_word[0])\n",
    "print(glove.most_similar(glove.indices_word[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "44f350eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存和加载模型\n",
    "word2vec.save_model('./word2vec_chengyu_model.npz')\n",
    "word2vec.load_model('./word2vec_chengyu_model.npz')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3cce98ee",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "pytorch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
