{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# GloVe Pytorch实现"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "日志参数设置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "import logging\n",
    "logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 加载语料库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "import spacy\n",
    "from collections import defaultdict\n",
    "\n",
    "class SpacyTokenizer:\n",
    "    \"\"\" Tool for tokenize powered by spacy module\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, lang: str, disable=['parser', 'tagger', 'ner']):\n",
    "        \"\"\" Initialize the language type for token\n",
    "        Args:\n",
    "            lang (str): language type for tokenizer\n",
    "        \"\"\"\n",
    "        self._nlp = spacy.load(lang)\n",
    "\n",
    "    def tokenize(self, text: str) -> list:\n",
    "        # we don't need new line as token\n",
    "        lines = text.splitlines()\n",
    "\n",
    "        doc = [[token.text for token\n",
    "                in self._nlp.tokenizer(text.strip())] for text in lines]\n",
    "\n",
    "        return doc\n",
    "\n",
    "    \n",
    "class Dictionary:\n",
    "    \"\"\" Tool to build word2idx and doc2idx\n",
    "    Args:\n",
    "        doc {list}: list of documents contains words\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, doc=None):\n",
    "\n",
    "        self.vocab_size = 0\n",
    "        self.word2idx = defaultdict(int)\n",
    "\n",
    "        self.update(doc)\n",
    "\n",
    "    def update(self, doc: list):\n",
    "        \"\"\" Update word2idx information by doc\n",
    "        Args:\n",
    "            doc (list): list of words\n",
    "        \"\"\"\n",
    "\n",
    "        if doc is None:\n",
    "            return\n",
    "\n",
    "        vocab_size, word2idx = self.vocab_size, self.word2idx\n",
    "\n",
    "        # count word occurrance and vocab size\n",
    "        tokens = set()\n",
    "        for line in doc:\n",
    "            tokens.update(line)\n",
    "\n",
    "        for token in tokens:\n",
    "            if token not in word2idx:\n",
    "                word2idx[token] = vocab_size\n",
    "                vocab_size += 1\n",
    "\n",
    "        self.vocab_size = vocab_size\n",
    "\n",
    "    def corpus(self, doc: list) -> list:\n",
    "        \"\"\" Convert text of documents to idx of documents\n",
    "        Args:\n",
    "            doc (list): text of documents\n",
    "        Returns:\n",
    "            list: idx of documents\n",
    "        \"\"\"\n",
    "\n",
    "        word2idx = self.word2idx\n",
    "        corpus = [[word2idx[word] for word in line if word in word2idx]\n",
    "                  for line in doc]\n",
    "        return corpus\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import pickle\n",
    "import zipfile\n",
    "\n",
    "def read_data(file_path, type='file'):\n",
    "    \"\"\" Read data into a string\n",
    "    Args:\n",
    "        file_path (str): path for the data file\n",
    "    \"\"\"\n",
    "    text = None\n",
    "    if type is 'file':\n",
    "        with open(file_path, mode='r', encoding='utf-8') as fp:\n",
    "            text = fp.read()\n",
    "    elif type is 'zip':\n",
    "        with zipfile.ZipFile(file_path) as fp:\n",
    "            text = fp.read(fp.namelist()[0]).decode()\n",
    "    return text\n",
    "\n",
    "def preprocess(file_path):\n",
    "    \"\"\" Get corpus and vocab_size from raw text\n",
    "    Args:\n",
    "        file_path (str): raw file path\n",
    "    Returns:\n",
    "        corpus (list): list of idx words\n",
    "        vocab_size (int): vocabulary size\n",
    "    \"\"\"\n",
    "\n",
    "    # preprocess read raw text\n",
    "    text = read_data(FILE_PATH, type='file')\n",
    "    logging.info(\"read raw data\")\n",
    "\n",
    "    # init base model\n",
    "    tokenizer = SpacyTokenizer(LANG)\n",
    "    dictionary = Dictionary()\n",
    "\n",
    "    # build corpus\n",
    "    doc = tokenizer.tokenize(text)\n",
    "    logging.info(\"after generate tokens from text\")\n",
    "\n",
    "    # save doc\n",
    "    with open(DOC_PATH, mode='wb') as fp:\n",
    "        pickle.dump(doc, fp)\n",
    "    logging.info(\"tokenized documents saved!\")\n",
    "    \n",
    "    # load doc\n",
    "    with open(DOC_PATH, 'rb') as fp:\n",
    "        doc = pickle.load(fp)\n",
    "\n",
    "    dictionary.update(doc)\n",
    "    logging.info(\"after generate dictionary\")\n",
    "    corpus = dictionary.corpus(doc)\n",
    "    word2idx = dictionary.word2idx\n",
    "    vocab_size = dictionary.vocab_size\n",
    "\n",
    "    return corpus, vocab_size, word2idx"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建GloVe模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.init as init\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader, Dataset\n",
    "from collections import Counter, defaultdict\n",
    "\n",
    "\n",
    "class GloVe(nn.Module):\n",
    "    \"\"\"Implement GloVe model with Pytorch\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, embedding_size, context_size, vocab_size, min_occurrance=1, x_max=100, alpha=3 / 4):\n",
    "        super(GloVe, self).__init__()\n",
    "\n",
    "        self.embedding_size = embedding_size\n",
    "        if isinstance(context_size, tuple):\n",
    "            self.left_context, self.right_context = context_size\n",
    "        if isinstance(context_size, int):\n",
    "            self.left_context = self.right_context = context_size\n",
    "        else:\n",
    "            raise ValueError(\n",
    "                \"'context_size' should be an int or a tuple of two ints\")\n",
    "        \n",
    "        self.vocab_size = vocab_size\n",
    "        self.alpha = alpha\n",
    "        self.min_occurrance = min_occurrance\n",
    "        self.x_max = x_max\n",
    "\n",
    "        self._focal_embeddings = nn.Embedding(\n",
    "            vocab_size, embedding_size).type(torch.float64)\n",
    "        self._context_embeddings = nn.Embedding(\n",
    "            vocab_size, embedding_size).type(torch.float64)\n",
    "        \n",
    "        self._focal_biases = nn.Embedding(vocab_size, 1).type(torch.float64)\n",
    "        self._context_biases = nn.Embedding(vocab_size, 1).type(torch.float64)\n",
    "        self._glove_dataset = None\n",
    "\n",
    "        for params in self.parameters():\n",
    "            init.uniform_(params, a=-1, b=1)\n",
    "\n",
    "    def fit(self, corpus):\n",
    "        \"\"\"get dictionary word list and co-occruence matrix from corpus\n",
    "        Args:\n",
    "            corpus (list): contain word id list\n",
    "        Raises:\n",
    "            ValueError: when count zero cocurrences will raise the problems\n",
    "        \"\"\"\n",
    "\n",
    "        left_size, right_size = self.left_context, self.right_context\n",
    "        vocab_size, min_occurrance = self.vocab_size, self.min_occurrance\n",
    "\n",
    "        # get co-occurence count matrix X\n",
    "        word_counts = Counter()\n",
    "        cooccurence_counts = defaultdict(float)\n",
    "        for region in corpus:\n",
    "            word_counts.update(region)\n",
    "            for left_context, word, right_context in _context_windows(region, left_size, right_size):\n",
    "                for i, context_word in enumerate(left_context[::-1]):\n",
    "                    # add (1 / distance from focal word) for this pair\n",
    "                    cooccurence_counts[(word, context_word)] += 1 / (i + 1)\n",
    "                for i, context_word in enumerate(right_context):\n",
    "                    cooccurence_counts[(word, context_word)] += 1 / (i + 1)\n",
    "        if len(cooccurence_counts) == 0:\n",
    "            raise ValueError(\n",
    "                \"No coccurrences in corpus, Did you try to reuse a generator?\")\n",
    "\n",
    "        # get words bag information\n",
    "        tokens = [word for word, count in\n",
    "                  word_counts.most_common(vocab_size) if count >= min_occurrance]\n",
    "        coocurrence_matrix = [(words[0], words[1], count)\n",
    "                              for words, count in cooccurence_counts.items()\n",
    "                              if words[0] in tokens and words[1] in tokens]\n",
    "        self._glove_dataset = GloVeDataSet(coocurrence_matrix)\n",
    "\n",
    "    def train(self, num_epoch, device, batch_size=512, learning_rate=0.05, loop_interval=10):\n",
    "        \"\"\"Training GloVe model\n",
    "        Args:\n",
    "            num_epoch (int): number of epoch\n",
    "            device (str): cpu or gpu\n",
    "            batch_size (int, optional): Defaults to 512.\n",
    "            learning_rate (float, optional): Defaults to 0.05. learning rate for Adam optimizer\n",
    "            batch_interval (int, optional): Defaults to 100. interval time to show average loss\n",
    "        Raises:\n",
    "            NotFitToCorpusError: if the model is not fit by corpus, the error will be raise\n",
    "        \"\"\"\n",
    "\n",
    "        if self._glove_dataset is None:\n",
    "            raise NotFitToCorpusError(\n",
    "                \"Please fit model with corpus before training\")\n",
    "\n",
    "        # basic training setting\n",
    "        optimizer = optim.Adam(self.parameters(), lr=learning_rate)\n",
    "        glove_dataloader = DataLoader(self._glove_dataset, batch_size)\n",
    "        total_loss = 0\n",
    "        \n",
    "        for epoch in range(num_epoch):\n",
    "            for idx, batch in enumerate(glove_dataloader):\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                i_s, j_s, counts = batch\n",
    "                i_s = i_s.to(device)\n",
    "                j_s = j_s.to(device)\n",
    "                counts = counts.to(device)\n",
    "                loss = self._loss(i_s, j_s, counts)\n",
    "\n",
    "                total_loss += loss.item()\n",
    "                if idx % loop_interval == 0:\n",
    "                    avg_loss = total_loss / loop_interval\n",
    "                    print(\"epoch: {}/{}, current step: {}, average loss: {}\".format(\n",
    "                        epoch+1, num_epoch, idx, avg_loss))\n",
    "                    total_loss = 0\n",
    "\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "\n",
    "        print(\"finish glove vector training\")\n",
    "\n",
    "    def get_coocurrance_matrix(self):\n",
    "        \"\"\" Return co-occurance matrix for saving\n",
    "        Returns:\n",
    "            list: list itam (word_idx1, word_idx2, cooccurances)\n",
    "        \"\"\"\n",
    "\n",
    "        return self._glove_dataset._coocurrence_matrix\n",
    "\n",
    "    def embedding_for_tensor(self, tokens):\n",
    "        if not torch.is_tensor(tokens):\n",
    "            raise ValueError(\"the tokens must be pytorch tensor object\")\n",
    "\n",
    "        return self._focal_embeddings(tokens) + self._context_embeddings(tokens)\n",
    "\n",
    "    def _loss(self, focal_input, context_input, coocurrence_count):\n",
    "        x_max, alpha = self.x_max, self.alpha\n",
    "\n",
    "        focal_embed = self._focal_embeddings(focal_input)\n",
    "        context_embed = self._context_embeddings(context_input)\n",
    "        focal_bias = self._focal_biases(focal_input)\n",
    "        context_bias = self._context_biases(context_input)\n",
    "\n",
    "        # count weight factor f(x)\n",
    "        weight_factor = torch.pow(coocurrence_count / x_max, alpha)\n",
    "        weight_factor[weight_factor > 1] = 1\n",
    "\n",
    "        embedding_products = torch.sum(focal_embed * context_embed, dim=1)\n",
    "        log_cooccurrences = torch.log(coocurrence_count)\n",
    "\n",
    "        distance_expr = (embedding_products + focal_bias +\n",
    "                         context_bias + log_cooccurrences) ** 2\n",
    "\n",
    "        single_losses = weight_factor * distance_expr\n",
    "        mean_loss = torch.mean(single_losses)\n",
    "        return mean_loss\n",
    "\n",
    "\n",
    "class GloVeDataSet(Dataset):\n",
    "\n",
    "    def __init__(self, coocurrence_matrix):\n",
    "        self._coocurrence_matrix = coocurrence_matrix\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        return self._coocurrence_matrix[index]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self._coocurrence_matrix)\n",
    "\n",
    "def _context_windows(region, left_size, right_size):\n",
    "    \"\"\"generate left_context, word, right_context tuples for each region\n",
    "    Args:\n",
    "        region (str): a sentence\n",
    "        left_size (int): left windows size\n",
    "        right_size (int): right windows size\n",
    "    \"\"\"\n",
    "\n",
    "    for i, word in enumerate(region):\n",
    "        start_index = i - left_size\n",
    "        end_index = i + right_size\n",
    "        left_context = _window(region, start_index, i - 1)\n",
    "        right_context = _window(region, i + 1, end_index)\n",
    "        yield (left_context, word, right_context)\n",
    "\n",
    "\n",
    "def _window(region, start_index, end_index):\n",
    "    \"\"\"Returns the list of words starting from `start_index`, going to `end_index`\n",
    "    taken from region. If `start_index` is a negative number, or if `end_index`\n",
    "    is greater than the index of the last word in region, this function will pad\n",
    "    its return value with `NULL_WORD`.\n",
    "    Args:\n",
    "        region (str): the sentence for extracting the token base on the context\n",
    "        start_index (int): index for start step of window\n",
    "        end_index (int): index for the end step of window\n",
    "    \"\"\"\n",
    "    last_index = len(region) + 1\n",
    "    selected_tokens = region[max(start_index, 0):\n",
    "                             min(end_index, last_index) + 1]\n",
    "    return selected_tokens\n",
    "\n",
    "class NotTrainedError(Exception):\n",
    "    pass\n",
    "\n",
    "\n",
    "class NotFitToCorpusError(Exception):\n",
    "    pass"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "FILE_PATH = './bioCorpus_5000.txt'\n",
    "MODLE_PATH = './glove.pkl'\n",
    "DOC_PATH = './bioCorpus_5000.pickle'\n",
    "COMATRIX_PATH = './comat.pickle'\n",
    "LANG = 'en_core_web_sm'\n",
    "EMBEDDING_SIZE = 128\n",
    "CONTEXT_SIZE = 3\n",
    "NUM_EPOCH = 50\n",
    "BATHC_SIZE = 512\n",
    "LEARNING_RATE = 0.01\n",
    "\n",
    "corpus, vocab_size, word2idx = preprocess(FILE_PATH)\n",
    "\n",
    "# specify device type\n",
    "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "# init vector model\n",
    "logging.info(\"init model hyperparameter\")\n",
    "model = GloVe(EMBEDDING_SIZE, CONTEXT_SIZE, vocab_size)\n",
    "model.to(device)\n",
    "\n",
    "# fit corpus to count cooccurance matrix\n",
    "model.fit(.weight.data.cpu().numpy())\n",
    "\n",
    "cooccurance_matrix = model.get_coocurrance_matrix()\n",
    "# saving cooccurance_matrix\n",
    "with open(COMATRIX_PATH, mode='wb') as fp:\n",
    "    pickle.dump(cooccurance_matrix, fp)\n",
    "\n",
    "model.train(NUM_EPOCH, device, learning_rate=LEARNING_RATE)\n",
    "\n",
    "# save model for evaluation\n",
    "torch.save(model, MODLE_PATH)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load model\n",
    "model = torch.load(MODLE_PATH)\n",
    "\n",
    "for word, idx in word2idx.items():\n",
    "    emd = model.embedding_for_tensor(torch.tensor(idx).to(device)).data.cpu()\n",
    "#     print(word, emd, '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
