{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Binary Log Loss实验\n",
    "- 尝试一个不同的损失函数: binary log loss + 负例采样"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "\n",
    "import numpy as np\n",
    "from collections import Counter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据文件\n",
    "word_file = './data/bobsue.voc.txt'\n",
    "train_file = './data/bobsue.lm.train.txt'\n",
    "test_file = './data/bobsue.lm.test.txt'\n",
    "dev_file = './data/bobsue.lm.dev.txt'\n",
    "\n",
    "BATCH_SIZE = 32       # 批次大小\n",
    "EMBEDDING_DIM = 200   # 词向量维度\n",
    "EMBEDDING_OUT = 100   # 输出层词向量维度\n",
    "HIDDEN_DIM = 200      # 隐含层\n",
    "GRAD_CLIP = 5.        # 梯度截断值\n",
    "EPOCHS = 20\n",
    "LEARN_RATE = 0.001    # 初始学习率\n",
    "SAMPLE_NUM = 20       # 负例采样数目\n",
    "\n",
    "BEST_VALID_LOSS = float('inf')          # 初始验证集上的损失值，设为最大\n",
    "MODEL_PATH = \"lm-bll-samp-{}.pth\"       # 模型名称\n",
    "USE_CUDA = torch.cuda.is_available()    # 是否使用GPU\n",
    "NUM_CUDA = torch.cuda.device_count()    # GPU数量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_word_set(filename):\n",
    "    with open(filename, \"r\", encoding=\"utf-8\") as f:\n",
    "        word_set = set([line.strip() for line in f])\n",
    "    return word_set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_word_set(*paths, power=1):\n",
    "    text = []\n",
    "    for path in paths:\n",
    "        with open(path, 'r', encoding='utf-8') as f:\n",
    "            for line in f:\n",
    "                text.extend(line.split())\n",
    "    word_set = set(text)\n",
    "    word2idx = {w:i for i, w in enumerate(word_set, 1)}\n",
    "    idx2word = {i:w for i, w in enumerate(word_set, 1)}\n",
    "    vocab = Counter(text)\n",
    "    word_counts = torch.tensor([vocab[w] for w in word_set], dtype=torch.float32)\n",
    "    \n",
    "    word_freqs = word_counts / word_counts.sum()\n",
    "    return word_set, word2idx, idx2word, word_freqs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_corpus(filename):\n",
    "    \"\"\"读取数据集，返回句子列表\"\"\"\n",
    "    with open(filename, \"r\", encoding=\"utf-8\") as f:\n",
    "        sentences = [line.strip() for line in f]\n",
    "    return sentences\n",
    "\n",
    "def sentences2words(sentences):\n",
    "    return [w for s in sentences for w in s.split()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "word_set, word2idx, idx2word, word_freqs = create_word_set(train_file, dev_file, test_file, power=1)\n",
    "\n",
    "# 设置 <pad> 值为 0\n",
    "PAD_IDX = 0\n",
    "idx2word[PAD_IDX] = '<pad>'\n",
    "word2idx['<pad>'] = PAD_IDX\n",
    "\n",
    "VOCAB_SIZE = len(word_set)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1492"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "VOCAB_SIZE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_sentences = load_corpus(train_file)\n",
    "dev_sentences = load_corpus(dev_file)\n",
    "test_sentences = load_corpus(test_file)\n",
    "\n",
    "train_words = sentences2words(train_sentences)\n",
    "dev_words = sentences2words(dev_sentences)\n",
    "test_words = sentences2words(test_sentences)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集句子数: 6036，单词数: 71367.\n",
      "验证集句子数: 750，单词数: 8707.\n",
      "测试集句子数: 750，单词数: 8809.\n"
     ]
    }
   ],
   "source": [
    "s = \"{}句子数: {}，单词数: {}.\"\n",
    "print(s.format(\"训练集\", len(train_sentences), len(train_words)))\n",
    "print(s.format(\"验证集\", len(dev_sentences), len(dev_words)))\n",
    "print(s.format(\"测试集\", len(test_sentences), len(test_words)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def max_sentence_num(sentences):\n",
    "    \"\"\"返回最长句子单词数量\"\"\"\n",
    "    return max([len(s.split()) for s in sentences ])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集最长句子单词个数： 21\n",
      "验证集最长句子单词个数： 20\n",
      "测试集最长句子单词个数： 21\n",
      "训练集最短句子单词个数： 5\n",
      "验证集最短句子单词个数： 5\n",
      "测试集最短句子单词个数： 6\n"
     ]
    }
   ],
   "source": [
    "print(\"训练集最长句子单词个数：\", max([len(s.split()) for s in train_sentences ]))\n",
    "print(\"验证集最长句子单词个数：\", max([len(s.split()) for s in dev_sentences ]))\n",
    "print(\"测试集最长句子单词个数：\", max([len(s.split()) for s in test_sentences ]))\n",
    "\n",
    "print(\"训练集最短句子单词个数：\", min([len(s.split()) for s in train_sentences ]))\n",
    "print(\"验证集最短句子单词个数：\", min([len(s.split()) for s in dev_sentences ]))\n",
    "print(\"测试集最短句子单词个数：\", min([len(s.split()) for s in test_sentences ]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_sequence(corpus, word2idx, word_freqs, sample_num=20, seq_len=21):\n",
    "    \"\"\"输入语料句子列表，返回模型输入序列的idx\"\"\"\n",
    "    labels = []\n",
    "    sentences = []\n",
    "    neg_words = []\n",
    "    for sentence in corpus:\n",
    "        words = sentence.split()\n",
    "        sentence_tample = [0] * seq_len\n",
    "        for i, w in enumerate(words[:-1]):\n",
    "            sentence_tample[i] = word2idx[w]\n",
    "        target_tample = [0] * seq_len\n",
    "        for i, w in enumerate(words[1:]):\n",
    "            target_tample[i] = word2idx[w]\n",
    "        sentences.append(sentence_tample)\n",
    "        labels.append(target_tample)\n",
    "        # 负例采样\n",
    "        neg_words.append(torch.multinomial(word_freqs, seq_len * sample_num, True))\n",
    "    return (sentences, labels, neg_words)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data, train_label, train_neg = model_sequence(train_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)\n",
    "dev_data, dev_label, dev_neg = model_sequence(dev_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)\n",
    "test_data, test_label, test_neg = model_sequence(test_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[272, 44, 627, 297, 1042, 577, 673, 1389, 1131, 146, 1171, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "<s> She ate quickly and asked to be taken home . <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> ----------------------------------------\n",
      "She ate quickly and asked to be taken home . </s> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> ----------------------------------------\n",
      "tensor([ 273,  584,  848,  832,  736, 1170,  854,  539,   78,  271,  635,  436,\n",
      "         672,  397,  271,  457,   92, 1429,  381, 1439,  774,  214,  271,  736,\n",
      "         823,  322,  194, 1240,  976,  736, 1488, 1041, 1170, 1332, 1200,   12,\n",
      "        1170, 1487,  963, 1470,  501, 1182,  825,  630,  637,  669,  510, 1047,\n",
      "         271,  291,  952,  920,  164, 1240, 1250,  736, 1426,  630, 1240, 1070,\n",
      "        1150,   97,  736, 1038,  736,   61,  450,  514,  271,  109,  630,  732,\n",
      "        1170,  455,  672,  271,  672, 1240,  736, 1066,  362,  596,  273,  617,\n",
      "        1436,  672,  164,  799,  909, 1170, 1170,  247,  370,  227,  325, 1187,\n",
      "        1381,  247,  779,  971,  927,  277,  271,  664,  736,  271,  864,  672,\n",
      "        1078,  982, 1170,  271,  821, 1126,  834,  204,   36, 1170, 1186,  920,\n",
      "         252,   19, 1170,  636, 1170,  271,  736,  909,  676,  392, 1170, 1170,\n",
      "         736, 1170, 1439,  672, 1170, 1433,  736,  972, 1168,  271,  608,   72,\n",
      "         271, 1170,  652,  472,  322,  923,    2, 1126, 1101,  920, 1041,  358,\n",
      "         736,  809, 1052,   78,  450, 1403, 1240,  736, 1286,  736,  588,  800,\n",
      "         823,  637,   78,  457,  911,  385,   36,  662, 1040,  736, 1405, 1041,\n",
      "         736,  895,  253,  271,  239,  736,  457, 1020, 1242,   36,  325,  271,\n",
      "        1433, 1405,  851, 1439,  367,   52, 1170, 1170,  321,   78, 1240, 1403,\n",
      "        1439,  322,   86,   71,  750,  322,  271,  877,  672,  736, 1205,  271,\n",
      "         508, 1072, 1041,  736, 1170, 1439,  355,  770, 1170,  736, 1041, 1047,\n",
      "         139,   72,  271,  920,   42,  831,   43,  271, 1047,  271, 1403,  813,\n",
      "         920,  596,  637,  770,  273, 1439, 1448, 1215,  457,  271,  589,  920,\n",
      "         736, 1170, 1041,  450,  197,  395,  325,  291,  911,  780,   31, 1099,\n",
      "         247, 1170,   31, 1164, 1363, 1170,  736,  204,  247, 1170,  164, 1203,\n",
      "         836,  204,  271,  736,  672,  971,  844,   30,    8,  243,  973,  736,\n",
      "        1013, 1170,  457,  164,  963,  864,  271,  736,  736, 1396,  316,   43,\n",
      "          43,  931,   31,   43,  325, 1170, 1403, 1312,  501,  381,  218,  736,\n",
      "         271,  637,  322,  799,  417, 1347,  770,  457,  221,  271,  273, 1170,\n",
      "         939,  271, 1290,  301,   96, 1101,  327, 1446,  736,  799,  187,  672,\n",
      "         271,  247,   84,  217,   78,  932, 1170,  864,  372,  920,  271, 1170,\n",
      "          78, 1413,  931,  271, 1102, 1240,  952,  920, 1170,   78,  815, 1240,\n",
      "         271,  247, 1209,  782,   52, 1151,  413,  365,  247,  247, 1189, 1183,\n",
      "         271,  494, 1403,  699,  271, 1240, 1240, 1112,  164,  963, 1170,  920,\n",
      "        1391,  459,  736, 1477, 1170, 1292,  920,  271,   43,  468,  664, 1170,\n",
      "         371,  597,  588,  920,  291,  962, 1047, 1170,  271, 1170, 1189, 1439,\n",
      "          78,  557, 1209, 1240,  273,  194,   72,  630, 1188,  576,  630,  322])\n"
     ]
    }
   ],
   "source": [
    "a = train_data[0]\n",
    "print(a)\n",
    "for i in a:\n",
    "    print(idx2word[i], end=' ')\n",
    "print(\"--\"*20)\n",
    "b = train_label[0]\n",
    "for i in b:\n",
    "    print(idx2word[i], end=' ')\n",
    "print(\"--\"*20)\n",
    "print(train_neg[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([420])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "n = train_neg[0]\n",
    "n.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gene_batch_data(data, label, neg, batch_size=32):\n",
    "    \"\"\"\n",
    "    构建 batch tensor，返回 batch 列表，每个batch为三元组包含data和label、neg_word\n",
    "    \"\"\"\n",
    "    batch_data = []\n",
    "    data_tensor = torch.tensor(data, dtype=torch.long)\n",
    "    label_tensor = torch.tensor(label, dtype=torch.long)\n",
    "    neg_tensor = torch.stack(neg)\n",
    "    n, dim = data_tensor.size()\n",
    "    for start in range(0, n, batch_size):\n",
    "        end = start + batch_size\n",
    "        if end > n:\n",
    "            break\n",
    "            dbatch = data_tensor[start: ]\n",
    "            lbatch = label_tensor[start: ]\n",
    "            nbatch = neg_tensor[start: ]\n",
    "            print(\"最后一个batch size:\", dbatch.size())\n",
    "#             break\n",
    "        else:\n",
    "            dbatch = data_tensor[start: end]\n",
    "            lbatch = label_tensor[start: end]\n",
    "            nbatch = neg_tensor[start: end]\n",
    "        batch_data.append((dbatch, lbatch, nbatch))\n",
    "    return batch_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_batch = gene_batch_data(train_data, train_label, train_neg, batch_size=BATCH_SIZE)\n",
    "dev_batch = gene_batch_data(dev_data, dev_label, dev_neg, batch_size=BATCH_SIZE)\n",
    "test_batch = gene_batch_data(test_data, test_label, test_neg, batch_size=BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LSTMNegModel(nn.Module):\n",
    "    def __init__(self, embedding_dim, embedding_out, hidden_dim, vocab_size, sample_num):\n",
    "        super(LSTMNegModel, self).__init__()\n",
    "        self.sample_num = sample_num\n",
    "        self.embedding_dim = embedding_dim\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.in_embed = nn.Embedding(vocab_size, embedding_dim)\n",
    "        self.out_embed = nn.Embedding(vocab_size, embedding_out)\n",
    "        self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)\n",
    "        self.linear = nn.Linear(hidden_dim, embedding_out)\n",
    "        \n",
    "    def forward(self, data):\n",
    "        text, label, neg = data\n",
    "        # print(\"-\"*20)\n",
    "        # print(text.size())\n",
    "        # print(label.size())\n",
    "        # print(neg.size())   # (bacth, SAMPLE_NUM*seq_len)\n",
    "        # (torch.tensor([1,2,3,1]) != 1) ==>[0,1,1, 0]\n",
    "        mask = (text != PAD_IDX)     # (batch, seq_len)\n",
    "        # print(\"mask:\", mask.size())\n",
    "        # (batch, seq_len)-->(batch, 1, seq_len)-->(batch,SAMPLE_NUM,seq_len)-->（batch, SAMPLE_NUM*seq_len)\n",
    "        neg_mask = mask.unsqueeze(1).expand(text.size(0), SAMPLE_NUM, text.size(1)).contiguous().view(neg.size(0), neg.size(1))\n",
    "        # 当调用contiguous()时，会强制拷贝一份tensor\n",
    "\n",
    "        # print(\"neg_mask:\", neg_mask.size(), neg_mask.sum())  # (batch, seq_len*sample_num)\n",
    "        \n",
    "        embed = self.in_embed(text)   # (bacth,seq_len) --> (bacth, seq_len, in_emd_dim)\n",
    "        \n",
    "        # (batch, seq_len) -> (batch, seq_len, out_emb_dim)\n",
    "        label_embed = self.out_embed(label)\n",
    "        # (batch, seq_len*sample_num)-> (batch, seq_len*sample_num, out_emb_dim)\n",
    "        neg_embed = self.out_embed(neg)\n",
    "        \n",
    "        # (batch, seq_len, in_emb_dim) -> (batch, seq_len, out_emb_dim(hn_dim))\n",
    "        lstm_out, (h_n, c_n) = self.lstm(embed)\n",
    "        # (batch, seq_len, out_emb_dim) -> (batch, seq_len, out_emb_dim) 即形状不变\n",
    "        out = self.linear(lstm_out)\n",
    "        \n",
    "        # 计算损失\n",
    "        # (batch, seq_len, out_emb_dim) * (batch, seq_len, out_emb_dim) -> sum(2)-(batch, seq_len)\n",
    "        # 对应元素相乘，2维度上求和\n",
    "        label_score = (out * label_embed).sum(2)\n",
    "        # label_score = torch.mm(label_embed.squeeze(1), out.squeeze(1).permute(1, 0))\n",
    "        # (batch, seq_len*sample_num, out_emb_dim) * (batch, seq_len*sample_num, out_emb_dim) \n",
    "        out_expand = out.unsqueeze(1).expand(out.size(0), SAMPLE_NUM, out.size(1), \n",
    "                                             out.size(2)).contiguous().view(\n",
    "                                             neg_embed.size(0), neg_embed.size(1), neg_embed.size(2))\n",
    "        # (batch, seq_len*sample_num, out_emb_dim) -> (batch, seq_len*sample_num)\n",
    "        # 词向量合成一个数的意义是什么？\n",
    "        neg_score = (out_expand * neg_embed).sum(2)\n",
    "\n",
    "        label_score = label_score[mask]    # 这个操作会压缩成一行\n",
    "        neg_score = neg_score[neg_mask]\n",
    "        \n",
    "        log_label = F.logsigmoid(label_score).mean()   # 一个常数，这里取平均的意义是什么？\n",
    "        log_neg = torch.log(1 - torch.sigmoid(neg_score)).mean()\n",
    "\n",
    "        loss = log_label + log_neg\n",
    "        \n",
    "        return -loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "VOCAB_SIZE = len(word2idx)\n",
    "model = LSTMNegModel(EMBEDDING_DIM, EMBEDDING_OUT, HIDDEN_DIM, VOCAB_SIZE, SAMPLE_NUM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# DEVICE = torch.device(\"cuda\" if USE_CUDA else 'cpu')\n",
    "DEVICE = torch.device(\"cpu\")\n",
    "model = model.to(DEVICE)\n",
    "# if NUM_CUDA > 1:\n",
    "#     device_ids = list(range(NUM_CUDA))\n",
    "#     print(device_ids)\n",
    "#     model = nn.DataParallel(model, device_ids=device_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "def acc_score(y_hat, y):\n",
    "    # 返回最大的概率的索引\n",
    "    pred = y_hat.argmax(dim=1)\n",
    "    # print(y.view(-1))\n",
    "    acc_count = torch.eq(pred, y.view(-1))\n",
    "    score = acc_count.sum().item() / acc_count.size()[0]\n",
    "    return score\n",
    "\n",
    "def evaluate(model, device, iterator):\n",
    "    epoch_loss = 0  # 积累变量\n",
    "    model.eval()  # 不更新参数，预测模式\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for x, y, z in iterator:\n",
    "            x = x.to(device)\n",
    "            y = y.to(device)\n",
    "            z = z.to(device)\n",
    "            \n",
    "            loss = model((x,y,z))\n",
    "            epoch_loss += loss.item()\n",
    "            \n",
    "    return epoch_loss/len(iterator)\n",
    "\n",
    "\n",
    "def train(model, device, iterator, optimizer, grad_clip):\n",
    "    epoch_loss = 0  # 积累变量\n",
    "    model.train()   # 该函数表示PHASE=Train\n",
    "    \n",
    "    for x, y, z in iterator:  # 拿每一个minibatch\n",
    "        x = x.to(device)\n",
    "        y = y.to(device)\n",
    "        z = z.to(device)\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "    \n",
    "        loss = model((x,y,z))  # loss\n",
    "        loss.backward()        # 进行BP\n",
    "        # 梯度裁剪\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)\n",
    "        optimizer.step()  # 更新参数\n",
    "        epoch_loss += loss.item()\n",
    "\n",
    "    return epoch_loss/len(iterator)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/anaconda3/lib/python3.6/site-packages/torch/serialization.py:251: UserWarning: Couldn't retrieve source code for container of type LSTMNegModel. It won't be checked for correctness upon loading.\n",
      "  \"type \" + obj.__name__ + \". It won't be checked \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Save model path:lm-bll-samp-20.pth| train loss 0.6933950247599724| valid loss 0.4913798246694648\n",
      "Epoch:1|Train Loss:0.6933950247599724|Val Loss:0.4913798246694648\n",
      "Save model path:lm-bll-samp-20.pth| train loss 0.4101916064924382| valid loss 0.40508265080659284\n",
      "Epoch:2|Train Loss:0.4101916064924382|Val Loss:0.40508265080659284\n",
      "Save model path:lm-bll-samp-20.pth| train loss 0.3178675065332271| valid loss 0.3808679373367973\n",
      "Epoch:3|Train Loss:0.3178675065332271|Val Loss:0.3808679373367973\n",
      "Epoch:4|Train Loss:0.25422142271665815|Val Loss:0.385563172723936\n",
      "Epoch:5|Train Loss:0.20191996536673384|Val Loss:0.4142624761747277\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.15997874011543203|Val Loss:0.45911684243575385\n",
      "Epoch:7|Train Loss:0.12928659540224582|Val Loss:0.497904518376226\n",
      "Epoch:8|Train Loss:0.1091923418038703|Val Loss:0.5218355098496312\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:nan|Val Loss:nan\n",
      "Early stop!\n"
     ]
    }
   ],
   "source": [
    "optimizer = optim.Adam(model.parameters(), lr=LEARN_RATE)  # 指定优化器\n",
    "scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.5)   # 学习率缩减？\n",
    "\n",
    "SCHED_NUM = 0\n",
    "model_name = MODEL_PATH.format(SAMPLE_NUM)\n",
    "for epoch in range(1, EPOCHS+1):\n",
    "    train_loss = train(model, DEVICE, train_batch, optimizer, GRAD_CLIP)\n",
    "    valid_loss = evaluate(model, DEVICE, dev_batch)\n",
    "    if valid_loss < BEST_VALID_LOSS: # 如果是最好的模型就保存到文件夹\n",
    "        BEST_VALID_LOSS = valid_loss\n",
    "        torch.save(model, model_name)\n",
    "        print(\"Save model path:{}| train loss {}| valid loss {}\".format(model_name, train_loss, valid_loss))\n",
    "        SCHED_NUM = 0\n",
    "    else:\n",
    "        SCHED_NUM += 1\n",
    "        if SCHED_NUM % 3 == 0:\n",
    "            scheduler.step()\n",
    "            print(\"Current lr:\", optimizer.param_groups[0]['lr'])\n",
    "        if SCHED_NUM == 7:\n",
    "            print(\"Early stop!\")\n",
    "            break\n",
    "    print('Epoch:{}|Train Loss:{}|Val Loss:{}'.format(epoch, train_loss, valid_loss))\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Loss: 0.3985916440901549\n"
     ]
    }
   ],
   "source": [
    "model = torch.load(model_name)\n",
    "test_loss = evaluate(model, DEVICE, test_batch)\n",
    "print('Test Loss: {}'.format(test_loss))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 问题\n",
    "- 在使用binary log loss 的情况下，如何评价模型？\n",
    "- 梯度截断的情况下依然会存在loss nan?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 不同负采样数量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***负采样数量20***\n",
      "Save model path:lm-bll-samp-20.pth| train loss 0.6850260819526429| valid loss 0.48111668617829034\n",
      "Epoch:1|Train Loss:0.6850260819526429|Val Loss:0.48111668617829034\n",
      "Save model path:lm-bll-samp-20.pth| train loss 0.4033044838207833| valid loss 0.4045242161854454\n",
      "Epoch:2|Train Loss:0.4033044838207833|Val Loss:0.4045242161854454\n",
      "Save model path:lm-bll-samp-20.pth| train loss 0.317084124430697| valid loss 0.38859081268310547\n",
      "Epoch:3|Train Loss:0.317084124430697|Val Loss:0.38859081268310547\n",
      "Epoch:4|Train Loss:0.25521340173609713|Val Loss:0.3989918193091517\n",
      "Epoch:5|Train Loss:0.20302926701434115|Val Loss:0.4337702691555023\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.1606861978690041|Val Loss:0.4862003248670827\n",
      "Epoch:7|Train Loss:0.12954443748644057|Val Loss:0.5440482626790586\n",
      "Epoch:8|Train Loss:0.10722808088076875|Val Loss:0.601476040871247\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:0.09415154880348672|Val Loss:0.6661981914354407\n",
      "Early stop!\n",
      "Start test model: lm-bll-samp-20.pth\n",
      "Test Loss: 0.4130882659684057\n",
      "***负采样数量100***\n",
      "Save model path:lm-bll-samp-100.pth| train loss 0.6820947372532905| valid loss 0.48492626003597095\n",
      "Epoch:1|Train Loss:0.6820947372532905|Val Loss:0.48492626003597095\n",
      "Save model path:lm-bll-samp-100.pth| train loss 0.3994534594264436| valid loss 0.3995167561199354\n",
      "Epoch:2|Train Loss:0.3994534594264436|Val Loss:0.3995167561199354\n",
      "Save model path:lm-bll-samp-100.pth| train loss 0.3109292778721515| valid loss 0.37747382728949835\n",
      "Epoch:3|Train Loss:0.3109292778721515|Val Loss:0.37747382728949835\n",
      "Epoch:4|Train Loss:0.2507707264195097|Val Loss:0.38404754063357477\n",
      "Epoch:5|Train Loss:0.20165716135438452|Val Loss:0.4151491781939631\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.16272602127270497|Val Loss:0.46648676369501196\n",
      "Epoch:7|Train Loss:nan|Val Loss:nan\n",
      "Epoch:8|Train Loss:nan|Val Loss:nan\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:nan|Val Loss:nan\n",
      "Early stop!\n",
      "Start test model: lm-bll-samp-100.pth\n",
      "Test Loss: 0.40173600160557293\n",
      "***负采样数量500***\n",
      "Save model path:lm-bll-samp-500.pth| train loss 0.6668317427343511| valid loss 0.47362106779347296\n",
      "Epoch:1|Train Loss:0.6668317427343511|Val Loss:0.47362106779347296\n",
      "Save model path:lm-bll-samp-500.pth| train loss 0.3925739087639971| valid loss 0.40046126816583716\n",
      "Epoch:2|Train Loss:0.3925739087639971|Val Loss:0.40046126816583716\n",
      "Save model path:lm-bll-samp-500.pth| train loss 0.30847049036875684| valid loss 0.3803785054580025\n",
      "Epoch:3|Train Loss:0.30847049036875684|Val Loss:0.3803785054580025\n",
      "Epoch:4|Train Loss:0.24861439689993858|Val Loss:0.3855181465978208\n",
      "Epoch:5|Train Loss:nan|Val Loss:nan\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:nan|Val Loss:nan\n",
      "Epoch:7|Train Loss:nan|Val Loss:nan\n",
      "Epoch:8|Train Loss:nan|Val Loss:nan\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:nan|Val Loss:nan\n",
      "Early stop!\n",
      "Start test model: lm-bll-samp-500.pth\n",
      "Test Loss: 0.40358193283495697\n"
     ]
    }
   ],
   "source": [
    "sample_num = [20, 100, 500]\n",
    "for n in sample_num:\n",
    "    print(\"***负采样数量{}***\".format(n))\n",
    "    model_name = 'lm-bll-samp-{}.pth'.format(n)\n",
    "    SAMPLE_NUM = n\n",
    "    BEST_VALID_LOSS = float('inf')\n",
    "    train_data, train_label, train_neg = model_sequence(train_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)\n",
    "    dev_data, dev_label, dev_neg = model_sequence(dev_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)\n",
    "    test_data, test_label, test_neg = model_sequence(test_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)\n",
    "    \n",
    "    \n",
    "    train_batch = gene_batch_data(train_data, train_label, train_neg, batch_size=BATCH_SIZE)\n",
    "    dev_batch = gene_batch_data(dev_data, dev_label, dev_neg, batch_size=BATCH_SIZE)\n",
    "    test_batch = gene_batch_data(test_data, test_label, test_neg, batch_size=BATCH_SIZE)\n",
    "    \n",
    "    model = LSTMNegModel(EMBEDDING_DIM, EMBEDDING_OUT, HIDDEN_DIM, VOCAB_SIZE, SAMPLE_NUM)\n",
    "    DEVICE = torch.device(\"cpu\")\n",
    "    model = model.to(DEVICE)\n",
    "    \n",
    "    \n",
    "    optimizer = optim.Adam(model.parameters(), lr=LEARN_RATE)  # 指定优化器\n",
    "    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.5)   # 学习率缩减？\n",
    "\n",
    "    SCHED_NUM = 0\n",
    "    for epoch in range(1, EPOCHS+1):\n",
    "        train_loss = train(model, DEVICE, train_batch, optimizer, GRAD_CLIP)\n",
    "        valid_loss = evaluate(model, DEVICE, dev_batch)\n",
    "        if valid_loss < BEST_VALID_LOSS: # 如果是最好的模型就保存到文件夹\n",
    "            BEST_VALID_LOSS = valid_loss\n",
    "            torch.save(model, model_name)\n",
    "            print(\"Save model path:{}| train loss {}| valid loss {}\".format(model_name, train_loss, valid_loss))\n",
    "            SCHED_NUM = 0\n",
    "        else:\n",
    "            SCHED_NUM += 1\n",
    "            if SCHED_NUM % 3 == 0:\n",
    "                scheduler.step()\n",
    "                print(\"Current lr:\", optimizer.param_groups[0]['lr'])\n",
    "            if SCHED_NUM == 7:\n",
    "                print(\"Early stop!\")\n",
    "                break\n",
    "        print('Epoch:{}|Train Loss:{}|Val Loss:{}'.format(epoch, train_loss, valid_loss))\n",
    "    print(\"Start test model:\", model_name)\n",
    "    model = torch.load(model_name)\n",
    "    test_loss = evaluate(model, DEVICE, test_batch)\n",
    "    print('Test Loss: {}'.format(test_loss))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Loss: 0.41382545232772827\n"
     ]
    }
   ],
   "source": [
    "model = torch.load('lm-bll-samp-20.pth')\n",
    "test_loss = evaluate(model, DEVICE, test_batch)\n",
    "print('Test Loss: {}'.format(test_loss))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Loss: 0.3996773258499477\n"
     ]
    }
   ],
   "source": [
    "model = torch.load('lm-bll-samp-100.pth')\n",
    "test_loss = evaluate(model, DEVICE, test_batch)\n",
    "print('Test Loss: {}'.format(test_loss))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Loss: 0.40358193283495697\n"
     ]
    }
   ],
   "source": [
    "model = torch.load('lm-bll-samp-500.pth')\n",
    "test_loss = evaluate(model, DEVICE, test_batch)\n",
    "print('Test Loss: {}'.format(test_loss))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 不同采样频率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_word_set(*paths, power=1):\n",
    "    text = []\n",
    "    for path in paths:\n",
    "        with open(path, 'r', encoding='utf-8') as f:\n",
    "            for line in f:\n",
    "                text.extend(line.split())\n",
    "    word_set = set(text)\n",
    "    word2idx = {w:i for i, w in enumerate(word_set, 1)}\n",
    "    idx2word = {i:w for i, w in enumerate(word_set, 1)}\n",
    "    vocab = Counter(text)\n",
    "    word_counts = torch.tensor([vocab[w] for w in word_set], dtype=torch.float32)\n",
    "    \n",
    "    word_freqs = word_counts / word_counts.sum()\n",
    "    word_freqs = word_freqs ** power\n",
    "    word_freqs = word_freqs / word_freqs.sum()\n",
    "    return word_set, word2idx, idx2word, word_freqs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***负采样评率0.1***\n",
      "Save model path:lm-bll-power-10.0.pth| train loss 0.9948989587261322| valid loss 0.7935010117033253\n",
      "Epoch:1|Train Loss:0.9948989587261322|Val Loss:0.7935010117033253\n",
      "Save model path:lm-bll-power-10.0.pth| train loss 0.6510189051957841| valid loss 0.6306528008502462\n",
      "Epoch:2|Train Loss:0.6510189051957841|Val Loss:0.6306528008502462\n",
      "Save model path:lm-bll-power-10.0.pth| train loss 0.50438765388854| valid loss 0.5974134932393613\n",
      "Epoch:3|Train Loss:0.50438765388854|Val Loss:0.5974134932393613\n",
      "Epoch:4|Train Loss:0.40621826670905375|Val Loss:0.6067650797574416\n",
      "Epoch:5|Train Loss:0.32600710128850124|Val Loss:0.6531567625377489\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.2615356782966472|Val Loss:0.7258318662643433\n",
      "Epoch:7|Train Loss:0.2128219671864459|Val Loss:0.8177054602166881\n",
      "Epoch:8|Train Loss:0.1776645783572755|Val Loss:0.9252345847046893\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:0.15405175513885122|Val Loss:1.0920288873755413\n",
      "Early stop!\n",
      "Start test model: lm-bll-power-10.0.pth\n",
      "Test Loss: 0.6086942449859951\n",
      "***负采样评率0.2***\n",
      "Save model path:lm-bll-power-20.0.pth| train loss 0.9748336850328648| valid loss 0.7547413758609606\n",
      "Epoch:1|Train Loss:0.9748336850328648|Val Loss:0.7547413758609606\n",
      "Save model path:lm-bll-power-20.0.pth| train loss 0.6323788933297421| valid loss 0.6168249871419824\n",
      "Epoch:2|Train Loss:0.6323788933297421|Val Loss:0.6168249871419824\n",
      "Save model path:lm-bll-power-20.0.pth| train loss 0.49545912064136344| valid loss 0.5861824880475583\n",
      "Epoch:3|Train Loss:0.49545912064136344|Val Loss:0.5861824880475583\n",
      "Epoch:4|Train Loss:0.4001466446417443|Val Loss:0.596151370069255\n",
      "Epoch:5|Train Loss:0.3228167788462436|Val Loss:0.642710136330646\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.2610978181374834|Val Loss:0.7307759652967039\n",
      "Epoch:7|Train Loss:0.21398929641284842|Val Loss:0.8385945662208225\n",
      "Epoch:8|Train Loss:0.17975101477288186|Val Loss:1.004443163457124\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:0.15831324410565356|Val Loss:0.9026282792506011\n",
      "Early stop!\n",
      "Start test model: lm-bll-power-20.0.pth\n",
      "Test Loss: 0.6021935447402622\n",
      "***负采样评率0.30000000000000004***\n",
      "Save model path:lm-bll-power-30.000000000000004.pth| train loss 0.9867634741549797| valid loss 0.7650853395462036\n",
      "Epoch:1|Train Loss:0.9867634741549797|Val Loss:0.7650853395462036\n",
      "Save model path:lm-bll-power-30.000000000000004.pth| train loss 0.626170499210662| valid loss 0.6223440610844156\n",
      "Epoch:2|Train Loss:0.626170499210662|Val Loss:0.6223440610844156\n",
      "Save model path:lm-bll-power-30.000000000000004.pth| train loss 0.4874343772200828| valid loss 0.5943890810012817\n",
      "Epoch:3|Train Loss:0.4874343772200828|Val Loss:0.5943890810012817\n",
      "Epoch:4|Train Loss:0.3933774288347427|Val Loss:0.6066625170085741\n",
      "Epoch:5|Train Loss:0.3164727397738619|Val Loss:0.6591577426246975\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.25485571735399837|Val Loss:0.746193616286568\n",
      "Epoch:7|Train Loss:0.2084565154732542|Val Loss:0.8437900776448457\n",
      "Epoch:8|Train Loss:0.17540406427801924|Val Loss:0.863612755485203\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:0.15683165810843733|Val Loss:1.003725712713988\n",
      "Early stop!\n",
      "Start test model: lm-bll-power-30.000000000000004.pth\n",
      "Test Loss: 0.6106626650561457\n",
      "***负采样评率0.4***\n",
      "Save model path:lm-bll-power-40.0.pth| train loss 0.958821158776892| valid loss 0.7331090128940084\n",
      "Epoch:1|Train Loss:0.958821158776892|Val Loss:0.7331090128940084\n",
      "Save model path:lm-bll-power-40.0.pth| train loss 0.6107472032308578| valid loss 0.6006786901017894\n",
      "Epoch:2|Train Loss:0.6107472032308578|Val Loss:0.6006786901017894\n",
      "Save model path:lm-bll-power-40.0.pth| train loss 0.48071020381881835| valid loss 0.5776361797166907\n",
      "Epoch:3|Train Loss:0.48071020381881835|Val Loss:0.5776361797166907\n",
      "Epoch:4|Train Loss:0.38891044940720215|Val Loss:0.5957515913507213\n",
      "Epoch:5|Train Loss:0.31141142134970806|Val Loss:0.6544018750605376\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.24939557156981307|Val Loss:0.7439283702684485\n",
      "Epoch:7|Train Loss:0.20372031050476622|Val Loss:0.8144047649010367\n",
      "Epoch:8|Train Loss:0.17533108195725908|Val Loss:0.823820103769717\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:0.15775305611339022|Val Loss:0.9542471403660981\n",
      "Early stop!\n",
      "Start test model: lm-bll-power-40.0.pth\n",
      "Test Loss: 0.5966390539770541\n",
      "***负采样评率0.5***\n",
      "Save model path:lm-bll-power-50.0.pth| train loss 0.9435701893365129| valid loss 0.7158774007921633\n",
      "Epoch:1|Train Loss:0.9435701893365129|Val Loss:0.7158774007921633\n",
      "Save model path:lm-bll-power-50.0.pth| train loss 0.5931281227063625| valid loss 0.5870136437208756\n",
      "Epoch:2|Train Loss:0.5931281227063625|Val Loss:0.5870136437208756\n",
      "Save model path:lm-bll-power-50.0.pth| train loss 0.46800439947463096| valid loss 0.5646290286727573\n",
      "Epoch:3|Train Loss:0.46800439947463096|Val Loss:0.5646290286727573\n",
      "Epoch:4|Train Loss:0.3798060012941665|Val Loss:0.5805345747781836\n",
      "Epoch:5|Train Loss:0.30568357564984483|Val Loss:0.62824373141579\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.24523917720355887|Val Loss:0.7107136223627173\n",
      "Epoch:7|Train Loss:0.1996127222763731|Val Loss:0.8021654020185056\n",
      "Epoch:8|Train Loss:0.1671095055310016|Val Loss:0.8597482520601024\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:0.15087596774893872|Val Loss:0.8610861327337183\n",
      "Early stop!\n",
      "Start test model: lm-bll-power-50.0.pth\n",
      "Test Loss: 0.5790753831034121\n",
      "***负采样评率0.6000000000000001***\n",
      "Save model path:lm-bll-power-60.00000000000001.pth| train loss 0.9017548186981932| valid loss 0.6799185172371243\n",
      "Epoch:1|Train Loss:0.9017548186981932|Val Loss:0.6799185172371243\n",
      "Save model path:lm-bll-power-60.00000000000001.pth| train loss 0.5692631783003502| valid loss 0.5619786070740741\n",
      "Epoch:2|Train Loss:0.5692631783003502|Val Loss:0.5619786070740741\n",
      "Save model path:lm-bll-power-60.00000000000001.pth| train loss 0.4475860703498759| valid loss 0.5372326749822368\n",
      "Epoch:3|Train Loss:0.4475860703498759|Val Loss:0.5372326749822368\n",
      "Epoch:4|Train Loss:0.36160829314526094|Val Loss:0.5483119708040486\n",
      "Epoch:5|Train Loss:0.28881952483603296|Val Loss:0.5967876664970232\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.22951148006510227|Val Loss:0.676640132199163\n",
      "Epoch:7|Train Loss:nan|Val Loss:nan\n",
      "Epoch:8|Train Loss:nan|Val Loss:nan\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:nan|Val Loss:nan\n",
      "Early stop!\n",
      "Start test model: lm-bll-power-60.00000000000001.pth\n",
      "Test Loss: 0.5482260558916174\n",
      "***负采样评率0.7000000000000001***\n",
      "Save model path:lm-bll-power-70.0.pth| train loss 0.8648699962712348| valid loss 0.6393718849057737\n",
      "Epoch:1|Train Loss:0.8648699962712348|Val Loss:0.6393718849057737\n",
      "Save model path:lm-bll-power-70.0.pth| train loss 0.537889005814461| valid loss 0.5257362218006797\n",
      "Epoch:2|Train Loss:0.537889005814461|Val Loss:0.5257362218006797\n",
      "Save model path:lm-bll-power-70.0.pth| train loss 0.42365823353224613| valid loss 0.4987663678500963\n",
      "Epoch:3|Train Loss:0.42365823353224613|Val Loss:0.4987663678500963\n",
      "Epoch:4|Train Loss:0.34248341810196004|Val Loss:0.5069697680680648\n",
      "Epoch:5|Train Loss:0.2736330385854904|Val Loss:0.5483671076919722\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.21770472816647368|Val Loss:0.6155142512010492\n",
      "Epoch:7|Train Loss:0.175786009890602|Val Loss:0.6884120028951893\n",
      "Epoch:8|Train Loss:0.14760311391759426|Val Loss:0.7698867321014404\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:0.1315900529239406|Val Loss:0.7835414772448333\n",
      "Early stop!\n",
      "Start test model: lm-bll-power-70.0.pth\n",
      "Test Loss: 0.5211785280186197\n",
      "***负采样评率0.8***\n",
      "Save model path:lm-bll-power-80.0.pth| train loss 0.8012913681091146| valid loss 0.5941181701162587\n",
      "Epoch:1|Train Loss:0.8012913681091146|Val Loss:0.5941181701162587\n",
      "Save model path:lm-bll-power-80.0.pth| train loss 0.4963791089806151| valid loss 0.48974250321802887\n",
      "Epoch:2|Train Loss:0.4963791089806151|Val Loss:0.48974250321802887\n",
      "Save model path:lm-bll-power-80.0.pth| train loss 0.38738526134414875| valid loss 0.46564420539399853\n",
      "Epoch:3|Train Loss:0.38738526134414875|Val Loss:0.46564420539399853\n",
      "Epoch:4|Train Loss:0.3097085648394646|Val Loss:0.4749450255995211\n",
      "Epoch:5|Train Loss:0.24491260787273975|Val Loss:0.5136380921239438\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.19346652766491504|Val Loss:0.5805068029009778\n",
      "Epoch:7|Train Loss:0.15600876759816992|Val Loss:0.6635569048964459\n",
      "Epoch:8|Train Loss:0.12969337978420106|Val Loss:0.7160062427106111\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:0.11285069826594059|Val Loss:0.7800557846608369\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Early stop!\n",
      "Start test model: lm-bll-power-80.0.pth\n",
      "Test Loss: 0.48981772298398224\n",
      "***负采样评率0.9***\n",
      "Save model path:lm-bll-power-90.0.pth| train loss 0.7386503298865988| valid loss 0.5410839500634567\n",
      "Epoch:1|Train Loss:0.7386503298865988|Val Loss:0.5410839500634567\n",
      "Save model path:lm-bll-power-90.0.pth| train loss 0.4514954851028767| valid loss 0.45555459546006244\n",
      "Epoch:2|Train Loss:0.4514954851028767|Val Loss:0.45555459546006244\n",
      "Save model path:lm-bll-power-90.0.pth| train loss 0.3583537828414998| valid loss 0.4365962821504344\n",
      "Epoch:3|Train Loss:0.3583537828414998|Val Loss:0.4365962821504344\n",
      "Epoch:4|Train Loss:0.2898053713142872|Val Loss:0.4457312926002171\n",
      "Epoch:5|Train Loss:0.2302773582174423|Val Loss:0.4818404653797979\n",
      "Current lr: 0.001\n",
      "Epoch:6|Train Loss:0.18138453071104718|Val Loss:0.5416174245917279\n",
      "Epoch:7|Train Loss:0.14555465786698016|Val Loss:0.5979200472002444\n",
      "Epoch:8|Train Loss:0.120943028876122|Val Loss:0.6374243718126545\n",
      "Current lr: 0.0005\n",
      "Epoch:9|Train Loss:nan|Val Loss:nan\n",
      "Early stop!\n",
      "Start test model: lm-bll-power-90.0.pth\n",
      "Test Loss: 0.45819183795348456\n"
     ]
    }
   ],
   "source": [
    "SAMPLE_NUM = 20\n",
    "\n",
    "for p in range(1, 10):\n",
    "    BEST_VALID_LOSS = float('inf')\n",
    "    power = 0.1*p \n",
    "    print(\"***负采样评率{}***\".format(power))\n",
    "    model_name = 'lm-bll-power-{}.pth'.format(power*100)\n",
    "    word_set, word2idx, idx2word, word_freqs = create_word_set(train_file, dev_file, test_file, power=power)\n",
    "\n",
    "    # 设置 <pad> 值为 0\n",
    "    PAD_IDX = 0\n",
    "    idx2word[PAD_IDX] = '<pad>'\n",
    "    word2idx['<pad>'] = PAD_IDX\n",
    "    \n",
    "    train_sentences = load_corpus(train_file)\n",
    "    dev_sentences = load_corpus(dev_file)\n",
    "    test_sentences = load_corpus(test_file)\n",
    "\n",
    "    train_words = sentences2words(train_sentences)\n",
    "    dev_words = sentences2words(dev_sentences)\n",
    "    test_words = sentences2words(test_sentences)\n",
    "    train_data, train_label, train_neg = model_sequence(train_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)\n",
    "    dev_data, dev_label, dev_neg = model_sequence(dev_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)\n",
    "    test_data, test_label, test_neg = model_sequence(test_sentences, word2idx, word_freqs, sample_num=SAMPLE_NUM)\n",
    "    \n",
    "    \n",
    "    train_batch = gene_batch_data(train_data, train_label, train_neg, batch_size=BATCH_SIZE)\n",
    "    dev_batch = gene_batch_data(dev_data, dev_label, dev_neg, batch_size=BATCH_SIZE)\n",
    "    test_batch = gene_batch_data(test_data, test_label, test_neg, batch_size=BATCH_SIZE)\n",
    "    \n",
    "    model = LSTMNegModel(EMBEDDING_DIM, EMBEDDING_OUT, HIDDEN_DIM, VOCAB_SIZE, SAMPLE_NUM)\n",
    "    DEVICE = torch.device(\"cpu\")\n",
    "    model = model.to(DEVICE)\n",
    "    \n",
    "    \n",
    "    optimizer = optim.Adam(model.parameters(), lr=LEARN_RATE)  # 指定优化器\n",
    "    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.5)   # 学习率缩减？\n",
    "\n",
    "    SCHED_NUM = 0\n",
    "    for epoch in range(1, EPOCHS+1):\n",
    "        train_loss = train(model, DEVICE, train_batch, optimizer, GRAD_CLIP)\n",
    "        valid_loss = evaluate(model, DEVICE, dev_batch)\n",
    "        if valid_loss < BEST_VALID_LOSS: # 如果是最好的模型就保存到文件夹\n",
    "            BEST_VALID_LOSS = valid_loss\n",
    "            torch.save(model, model_name)\n",
    "            print(\"Save model path:{}| train loss {}| valid loss {}\".format(model_name, train_loss, valid_loss))\n",
    "            SCHED_NUM = 0\n",
    "        else:\n",
    "            SCHED_NUM += 1\n",
    "            if SCHED_NUM % 3 == 0:\n",
    "                scheduler.step()\n",
    "                print(\"Current lr:\", optimizer.param_groups[0]['lr'])\n",
    "            if SCHED_NUM == 7:\n",
    "                print(\"Early stop!\")\n",
    "                break\n",
    "        print('Epoch:{}|Train Loss:{}|Val Loss:{}'.format(epoch, train_loss, valid_loss))\n",
    "    print(\"Start test model:\", model_name)\n",
    "    model = torch.load(model_name)\n",
    "    test_loss = evaluate(model, DEVICE, test_batch)\n",
    "    print('Test Loss: {}'.format(test_loss))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
