{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "50881653",
   "metadata": {},
   "source": [
    "# 机器读心术之文本挖掘与自然语言处理第13课书面作业\n",
    "学号：207402  \n",
    "\n",
    "**作业内容：**  \n",
    "1. 写程序实现Koehn书英文版第91页example的计算  \n",
    "2. （可选）利用课程中微信聊天机器人实现攻略文档，构建一名微信聊天机器人，将实验过程抓图，注意微信会侦测和查封机器人，尽量注册马甲完成实验  \n",
    "3. （可选）下载课程中介绍过的欧盟平行语料库（中的某一对语言的平行语料），编程实现EM算法对其进行训练得出参数估计  "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e97569cb",
   "metadata": {},
   "source": [
    "## 第1题\n",
    "写程序实现Koehn书英文版第91页example的计算。  \n",
    "Koehn书中第91页的例子中，句子对如下：  \n",
    "\n",
    "| 序号 | 英语（e)  | 德语（f） |\n",
    "| ---- | --------- | --------- |\n",
    "| 1    | the house | das Haus  |\n",
    "| 2    | the book  | das Buch  |\n",
    "| 3    | a book    | ein Buch  |\n",
    "\n",
    "参见书的算法实现：  \n",
    "<img src=\"https://gitee.com/dotzhen/cloud-notes/raw/master/IBM_model1_em.png\" alt=\"IBM_model1_em\" style=\"zoom:70%;\" />"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "e0a8a1e8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "def preprocess(corpus):\n",
    "    text = ' '.join(corpus)\n",
    "    text = text.lower()  # 转小写\n",
    "    words = text.split()  # 分词\n",
    "    vocab_words = set(words)\n",
    "    vocab2id = {word: i for i, word in enumerate(vocab_words)}\n",
    "    id2vocab = {i: word for i, word in enumerate(vocab_words)}\n",
    "    return vocab2id, id2vocab\n",
    "\n",
    "def print_iter(turns, t,vocab2id_e,vocab2id_f):\n",
    "    if turns > 0:\n",
    "        print('%6s %6s 第%d轮'%('e','f',turns))\n",
    "    else:\n",
    "        print('%6s %6s 最终轮'%('e','f'))\n",
    "    for f in vocab2id_f:\n",
    "        for e in vocab2id_e:\n",
    "           print('%6s %6s %f'%(e,f,t[vocab2id_e[e]][vocab2id_f[f]]))\n",
    "        \n",
    "def em_proc(corpus_e, corpus_f,vocab2id_e,vocab2id_f, epsilon = 0.001,maxturns=10000,verbose=1):\n",
    "    vocabsize_e = len(vocab2id_e)\n",
    "    vocabsize_f = len(vocab2id_f)\n",
    "    t = np.zeros((vocabsize_e,vocabsize_f))\n",
    "    #初始化t\n",
    "    for i in range(vocabsize_f):\n",
    "        a = 1./vocabsize_f\n",
    "        for j in range(vocabsize_e):\n",
    "            t[j][i] = a\n",
    "    t_n = t.copy()\n",
    "    count = np.zeros((vocabsize_e,vocabsize_f))\n",
    "    total = np.zeros(vocabsize_f)\n",
    "    s_total = np.zeros(vocabsize_e)\n",
    "    \n",
    "    turns = 0\n",
    "    while True:\n",
    "        # initialize\n",
    "        count *= 0\n",
    "        total *= 0\n",
    "        for i in range(len(corpus_e)):\n",
    "            E = corpus_e[i].split()\n",
    "            F = corpus_f[i].split()\n",
    "            for e in E:\n",
    "                s_total[vocab2id_e[e]] = 0\n",
    "                for f in F:\n",
    "                    s_total[vocab2id_e[e]] += t[vocab2id_e[e]][vocab2id_f[f]]\n",
    "        for i in range(len(corpus_e)):\n",
    "            E = corpus_e[i].split()\n",
    "            F = corpus_f[i].split()\n",
    "            for e in E:\n",
    "                for f in F:\n",
    "                    count[vocab2id_e[e]][vocab2id_f[f]] += t[vocab2id_e[e]][vocab2id_f[f]]/s_total[vocab2id_e[e]]\n",
    "                    total[vocab2id_f[f]] += t[vocab2id_e[e]][vocab2id_f[f]]/s_total[vocab2id_e[e]]\n",
    "\n",
    "        for f in vocab2id_f:\n",
    "            for e in vocab2id_e:\n",
    "                t[vocab2id_e[e]][vocab2id_f[f]] = count[vocab2id_e[e]][vocab2id_f[f]]/total[vocab2id_f[f]]\n",
    "        turns += 1\n",
    "\n",
    "        if np.linalg.norm(t_n - t) <= epsilon:\n",
    "            if verbose==1:\n",
    "                print('after %d turns, calculation converged!'%(turns))\n",
    "            break\n",
    "        else:\n",
    "            t_n = t.copy()\n",
    "        if turns >= maxturns:\n",
    "            if verbose==1:\n",
    "                print('reach max turns!')\n",
    "            break\n",
    "    \n",
    "    return t"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "d9274b0f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "     e      f 第1轮\n",
      " house   buch 0.000000\n",
      "     a   buch 0.250000\n",
      "   the   buch 0.250000\n",
      "  book   buch 0.500000\n",
      " house    das 0.250000\n",
      "     a    das 0.000000\n",
      "   the    das 0.500000\n",
      "  book    das 0.250000\n",
      " house   haus 0.500000\n",
      "     a   haus 0.000000\n",
      "   the   haus 0.500000\n",
      "  book   haus 0.000000\n",
      " house    ein 0.000000\n",
      "     a    ein 0.500000\n",
      "   the    ein 0.000000\n",
      "  book    ein 0.500000\n",
      "------------------------\n",
      "     e      f 第2轮\n",
      " house   buch 0.000000\n",
      "     a   buch 0.200000\n",
      "   the   buch 0.200000\n",
      "  book   buch 0.600000\n",
      " house    das 0.173913\n",
      "     a    das 0.000000\n",
      "   the    das 0.695652\n",
      "  book    das 0.130435\n",
      " house   haus 0.500000\n",
      "     a   haus 0.000000\n",
      "   the   haus 0.500000\n",
      "  book   haus 0.000000\n",
      " house    ein 0.000000\n",
      "     a    ein 0.571429\n",
      "   the    ein 0.000000\n",
      "  book    ein 0.428571\n",
      "------------------------\n",
      "     e      f 第3轮\n",
      " house   buch 0.000000\n",
      "     a   buch 0.157200\n",
      "   the   buch 0.135397\n",
      "  book   buch 0.707402\n",
      " house    das 0.133141\n",
      "     a    das 0.000000\n",
      "   the    das 0.801434\n",
      "  book    das 0.065425\n",
      " house   haus 0.570637\n",
      "     a   haus 0.000000\n",
      "   the   haus 0.429363\n",
      "  book   haus 0.000000\n",
      " house    ein 0.000000\n",
      "     a    ein 0.640000\n",
      "   the    ein 0.000000\n",
      "  book    ein 0.360000\n",
      "------------------------\n",
      "     e      f 最终轮\n",
      " house   buch 0.000000\n",
      "     a   buch 0.000000\n",
      "   the   buch 0.000000\n",
      "  book   buch 1.000000\n",
      " house    das 0.000000\n",
      "     a    das 0.000000\n",
      "   the    das 1.000000\n",
      "  book    das 0.000000\n",
      " house   haus 0.997494\n",
      "     a   haus 0.000000\n",
      "   the   haus 0.002506\n",
      "  book   haus 0.000000\n",
      " house    ein 0.000000\n",
      "     a    ein 0.998741\n",
      "   the    ein 0.000000\n",
      "  book    ein 0.001259\n"
     ]
    }
   ],
   "source": [
    "corpus_e=['the house','the book','a book']\n",
    "corpus_f=['das Haus','das Buch','ein Buch']\n",
    "corpus_e = [x.lower() for x in corpus_e]\n",
    "corpus_f = [x.lower() for x in corpus_f]\n",
    "vocab2id_e, id2vocab_e = preprocess(corpus_e)\n",
    "vocab2id_f, id2vocab_f = preprocess(corpus_f)\n",
    "vocabsize_e = len(vocab2id_e)\n",
    "vocabsize_f = len(vocab2id_f)\n",
    "\n",
    "t = em_proc(corpus_e,corpus_f,vocab2id_e,vocab2id_f,maxturns=1,verbose=0)\n",
    "print_iter(1,t,vocab2id_e,vocab2id_f)\n",
    "print('------------------------')\n",
    "t = em_proc(corpus_e,corpus_f,vocab2id_e,vocab2id_f,maxturns=2,verbose=0)\n",
    "print_iter(2,t,vocab2id_e,vocab2id_f)\n",
    "print('------------------------')\n",
    "t = em_proc(corpus_e,corpus_f,vocab2id_e,vocab2id_f,maxturns=3,verbose=0)\n",
    "print_iter(3,t,vocab2id_e,vocab2id_f)\n",
    "print('------------------------')\n",
    "t = em_proc(corpus_e,corpus_f,vocab2id_e,vocab2id_f,epsilon=0.00001,maxturns=10000,verbose=0)\n",
    "print_iter(-1,t,vocab2id_e,vocab2id_f)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0d6d73f9",
   "metadata": {},
   "source": [
    "## 第2题\n",
    "利用课程中微信聊天机器人实现攻略文档，构建一名微信聊天机器人，将实验过程抓图。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ead8dc81",
   "metadata": {},
   "source": [
    "首先，网页版的微信入口基本上是被封掉了，刚开始运行wxBot程序总是报错，后面调试了一下，发现是网页版的微信因为安全的原因，基本上用不了了。所以通过网页版的微信入口构造微信聊天机器的路当前**走不通**。  \n",
    "网页版的微信这条路不行，还有PC版的微信客户端，也可以尝试从这个入口构造。\n",
    "* 我在图灵机器人网站上激活对话机器。\n",
    "<img src=\"https://gitee.com/dotzhen/cloud-notes/raw/master/nlp13-8.png\" alt=\"nlp13-8\" style=\"zoom: 50%;\" />\n",
    "* 在图灵对话机器人管理界面上有提示“微信个人号接入，您可通过第三方提供的插件接入微信个人号：**微群管家** 微友助手”。\n",
    "<img src=\"https://gitee.com/dotzhen/cloud-notes/raw/master/nlp13-7-1.png\" alt=\"nlp13-7-1\" style=\"zoom:50%;\" />\n",
    "* 我选择使用“微群管家”，这是一个exe版的windows程序，可以和微信PC客户端配合使用，版本上，我采用的是微群管家小翼版本v3.88，微信版本为3.2.1.127。运行小翼机器人程序后，截图如下：\n",
    "<img src=\"https://gitee.com/dotzhen/cloud-notes/raw/master/nlp13-6.png\" alt=\"nlp13-6\" style=\"zoom: 67%;\" />\n",
    "* 登录小翼机器人程序后会自动启动微信，通过手机扫码登录微信后，还需要设置一下图灵机器人API，设置入口：\"全局设置->通用设置->智能聊天\"，在弹出的窗口中的“图灵APIkey”中输入自己的API key即可完成全部设置：\n",
    "<img src=\"https://gitee.com/dotzhen/cloud-notes/raw/master/nlp13-4-1.jpg\" alt=\"nlp13-4-1\"/>\n",
    "* 可以进行简单的自动对话了，机器人反应还是比较快的：\n",
    "<img src=\"https://gitee.com/dotzhen/cloud-notes/raw/master/nlp13-5.png\" alt=\"nlp13-5\" style=\"zoom: 50%;\" />"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "52e197cd",
   "metadata": {},
   "source": [
    "## 第3题\n",
    "下载课程中介绍过的欧盟平行语料库（中的某一对语言的平行语料），编程实现EM算法对其进行训练得出参数估计。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0574dfad",
   "metadata": {},
   "source": [
    "基于前面实现的EM算法，很容易扩展到扩大范围。我尝试了一下，选用了英国-德国的平行语料库，没有进行过多的预处理，只是简单的分词，然后就进行EM算法迭代，结果是提示“内存不足”，看了一下，英语的语料中词汇空间大小是271951，对应德语词汇空间大小是616947，我们要计算的概率矩阵t，如果采用double类型，则需要消耗1.22T，显然没有那么多内存可用，需要重新考虑一下方案设计。\n",
    "\n",
    "### 3.1 方案选择\n",
    "**思路，开源节流：**\n",
    "1. “节流”：\n",
    "  * 要对平行语料库做充分的预处理，当前简单处理的词汇空间并不合理，导致了词汇空间太大，加重了计算量与存储空间。  \n",
    "  * 对于语料做向量化处理，方便后续的计算；  \n",
    "  * 选择一个合适的概率矩阵初始化动作。  \n",
    "2. \"开源\"：  \n",
    "  * 需要设计一种能够用时间换空间的做法，解决内存不足的问题，比如：稀疏矩阵，以应对巨量存储问题；  \n",
    "  * 需要设计一种可增量计算的模型，不然即使解决了存储问题，海量计算量的时间成本也可能很高；  \n",
    "\n",
    "**编程语言选择：**  \n",
    "1. 对于预处理部分，选择python语言，主要是计算量可接受，同时处理库丰富；  \n",
    "2. 对于概率矩阵初始化，选择python语言，主要是计算量可接受，处理灵活； \n",
    "3. 对于EM算法部分，选择用C++语言，主要考虑处理性能，经实测，这里如果选用1000行平行语料时，C++大概比python快5倍，行数越多，效果越明显。\n",
    "\n",
    "### 3.2 方案实现\n",
    "\n",
    "#### 3.2.1 数据预处理\n",
    "\n",
    "1. 删除语料库中的空行。  \n",
    "  经观察，语料库中有些行是空行，但是对应另一平行语料中可能是空行，也可能不是。这些空行对于处理是没有意义的，需要在两侧语料库中同步删除。\n",
    "2. 删除标点符号。  \n",
    "  语料中标点符号对于我们计算单词间的概率意义不大，这里我们选择将之删除。  \n",
    "3. 进行命名实体识别，并用识别结果替代原文。  \n",
    "  语料中有大量的人名、地名、时间标识等命名实体，我认为这一部分要进行识别并替换为识别后的结果，这样有助于减小词汇空间，实现降存储省计算量的目的。  \n",
    "  在这里我采用了python中比较成熟的SpaCy库进行命名实体识别动作。\n",
    "4. 删除不识别的单词。  \n",
    "  这里对于不能识别为单词的词汇，我采用了直接删除的处理。在单词识别上，我采用了python中enchant库来进行处理。\n",
    "\n",
    "#### 3.2.2 概率矩阵初始化\n",
    "\n",
    "有两个原因导致对于概率矩阵的初始值选择很重要：  \n",
    "* EM算法对初始值的选择很重要，不同的初始值可能会导致最终算法收敛的值不同，比较合适的初始值才能让算法收敛到你期望的点上。  \n",
    "* 如果要采用稀疏矩阵的方式来节省存储就不能采用平均分布的方式来初始化概率矩阵，不然初始化后的概率矩阵就已经很巨量了。从我选择的初始化方法来看，概率矩阵中只有0.7%的点有值（其他都是0值）。\n",
    "\n",
    "我这里采用的方法很简单：\n",
    "* 遍历每个（e|f）平行语句，对于每个$(e_i|f_j)$单词组合都累加1，即$t(e_i|f_j)=t(e_i|f_j)+1$，$t(e_i|f_j)$初始值为0；  \n",
    "* 然后对$t(e|f)$作归一化处理。\n",
    "\n",
    "#### 3.2.3 EM算法"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "633ade00",
   "metadata": {},
   "source": [
    "#### 3.2.3.1 增量EM算法"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "51116225",
   "metadata": {},
   "source": [
    "EM算法计算量很多，我查了一下论文，确实有减少计算量的方法研究，下面这篇论文就是微软提出的两种方法，可以显著降低将EM算法应用于具有大量案例的数据库（包括具有大量案例的数据库）的计算成本维度。这两种方法都基于部分E步，一种是增量式的EM，一种是lazy EM。我在这里采用了第一种增量式EM的方法，在E步计算时，只选择部分数据集进行，下次计算时再选择其他部分数据集，论文中证明这种方法能够在保证收敛性的条件下，大幅减少计算量。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d9cf3e2a",
   "metadata": {},
   "source": [
    "https://link.springer.com/content/pdf/10.1023/A:1017986506241.pdf"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6fe8f44e",
   "metadata": {},
   "source": [
    "#### 3.2.3.2 稀疏矩阵"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c8f77aae",
   "metadata": {},
   "source": [
    "稀疏矩阵的思路就是只保存矩阵中非零部分，我们这里是符合稀疏矩阵定义的。在这里我实现了一个C++版本的稀疏矩阵以及基于稀疏矩阵的增量EM算法。这个版本在计算不算太多规模数据时，运算良好，但是在计算大数据集时，速度比较慢，我尝试用这个版本计算了一下英-德语料，但是运行一天但是没有完成一次迭代。  \n",
    "因此，我还是实现了一个用二维数组+增量EM算法的版本，这个版本在应对英国-罗马尼亚语料库时运算良好，时间上也能接受。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "756d6f38",
   "metadata": {},
   "source": [
    "#### 3.2.3.3 C++二维数组+增量EM算法实现英语-罗马尼亚语平行语料概率计算"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "07386b65",
   "metadata": {},
   "source": [
    "上一节，已经提到用稀疏矩阵能够节省大量的内存，但是带来的问题是耗时太长，我这里还是实现了一个用二维数组+增量EM算法的版本。用二维数组重点要解决内存消耗的问题，下面是内存计算的一个表格："
   ]
  },
  {
   "cell_type": "markdown",
   "id": "31e35f00",
   "metadata": {},
   "source": [
    "| 预处理后     | 英-德语料库                                                  | 英-罗语料库                                                 |\n",
    "| ------------ | ------------------------------------------------------------ | ----------------------------------------------------------- |\n",
    "| 语料库规模   | 英：1894016行，最大长度：453<br>德：1894016行，最大长度：235 | 英：396041行，最大长度：208<br/>罗：396041行，最大长度：204 |\n",
    "| 词汇空间长度 | 英：62493<BR>德：78833                                       | 英：33007<br>罗：53919                                      |\n",
    "| t矩阵        | 18.4GB                                                       | 6.6G                                                        |\n",
    "| t矩阵的备份  | 18.4GB                                                       | 6.6G                                                        |\n",
    "| count矩阵    | 18.4GB                                                       | 6.6G                                                        |\n",
    "| total矩阵    | 308KB                                                        | 211KB                                                       |\n",
    "| s_total矩阵  | 244KB                                                        | 129KB                                                       |\n",
    "| 语料库1      | 3.2G                                                         | 0.3G                                                        |\n",
    "| 语料库2      | 1.7G                                                         | 0.3G                                                        |\n",
    "| 合计         | 60G                                                          | 20.5G                                                       |"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bd5af684",
   "metadata": {},
   "source": [
    "可以看出如果要计算英-德语料库的概率矩阵需要有60G内存，计算英-罗（罗马尼亚）语料库只需要21G内存。我的电脑内存是32G，可以用来计算英-罗语料库。我采用这种方法，同时将语料库分为25个组后，每个组计算一轮耗时大约27s。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "389bf8df",
   "metadata": {},
   "source": [
    "![nlp13-10](https://gitee.com/dotzhen/cloud-notes/raw/master/nlp13-10.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9157dc90",
   "metadata": {},
   "source": [
    "计算1000轮后，概率矩阵的MAE相差小于0.01，耗时也就7.5小时。继续运算还可以再降。  "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b62e4bd5",
   "metadata": {},
   "source": [
    "### 3.3 附录：代码"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "518aa53e",
   "metadata": {},
   "source": [
    "#### 3.3.1 预处理preprocess.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d92c0083",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from scipy.sparse import dok_matrix\n",
    "from collections import Counter\n",
    "import time\n",
    "import pickle\n",
    "import re\n",
    "import random\n",
    "import enchant\n",
    "from tqdm import tqdm\n",
    "import spacy\n",
    "from operator import itemgetter, attrgetter\n",
    "# enchant.list_languages()\n",
    "\n",
    "def isDeletable(sent, wc):\n",
    "    '''\n",
    "    输入：\n",
    "    sent: ['i','am', 'a','student']\n",
    "    '''\n",
    "#     sl =  sent.split()\n",
    "    for w in sent:\n",
    "        if wc[w]<=1:\n",
    "            return False\n",
    "    return True\n",
    "\n",
    "def updatecounter(sent,wc):\n",
    "    '''\n",
    "    输入：\n",
    "    sent: ['i','am', 'a','student']\n",
    "    '''\n",
    "#     sl =  sent.split()\n",
    "    for w in sent:\n",
    "        wc[w] -= 1\n",
    "\n",
    "def get_mini_set(corpus_e,corpus_f,wc_e,wc_f):\n",
    "    N = len(corpus_e)\n",
    "    M = N\n",
    "    no_del = 0\n",
    "    while True:\n",
    "        n = random.randint(0,N-1)\n",
    "        if isDeletable(corpus_e[n],wc_e) and isDeletable(corpus_f[n],wc_f):\n",
    "            updatecounter(corpus_e[n],wc_e)\n",
    "            updatecounter(corpus_f[n],wc_f)\n",
    "            del corpus_e[n]\n",
    "            del corpus_f[n]\n",
    "            N -= 1\n",
    "        else:\n",
    "            no_del += 1\n",
    "        if no_del > 2*N:\n",
    "            break\n",
    "    print(M,N,M-N)\n",
    "#     return corpus_e, corpus_f\n",
    "    \n",
    "def remove_null_line(corpus1,corpus2):\n",
    "    '''\n",
    "    输入：\n",
    "    corpus: [['i am a student'], ['he is a teacher'],...]\n",
    "    输出：\n",
    "    '''\n",
    "    null_lst1 = []\n",
    "    for i, s in enumerate(corpus1):\n",
    "        if len(s) == 0:\n",
    "            null_lst1.insert(0,i)\n",
    "    for i in null_lst1:\n",
    "        del corpus1[i]\n",
    "        del corpus2[i]\n",
    "    del null_lst1\n",
    "\n",
    "    null_lst2 = []\n",
    "    for i, s in enumerate(corpus2):\n",
    "        if len(s) == 0:\n",
    "            null_lst2.insert(0,i)\n",
    "    for i in null_lst2:\n",
    "        del corpus1[i]\n",
    "        del corpus2[i]\n",
    "    del null_lst2\n",
    "\n",
    "def remove_unknown(corpus, lan):\n",
    "    '''\n",
    "    输入：\n",
    "    corpus: [['i','am', 'a','student'], ['he','is','a','teacher'],...]\n",
    "    输出：\n",
    "    \n",
    "    '''\n",
    "    dicstr ={1:'en_US',2:'Romanian'}#'Bulgarian'}\n",
    "    dic = enchant.Dict(dicstr[lan])\n",
    "    \n",
    "    for s in tqdm(corpus):\n",
    "        dlist = []\n",
    "        for i,c in enumerate(s):\n",
    "            if dic.check(c):\n",
    "                continue\n",
    "            else:\n",
    "                dlist.append(i)\n",
    "        if len(dlist) > 0:\n",
    "            dlist.reverse()\n",
    "            for d in dlist:\n",
    "                del s[d]\n",
    "        del dlist\n",
    "        \n",
    "def remove_punc(corpus):\n",
    "    '''\n",
    "    输入：\n",
    "    corpus: [['i am a student'], ['he is a teacher'],...]\n",
    "    输出：\n",
    "    '''\n",
    "    lists_new = []\n",
    "    for list in corpus:\n",
    "        string = re.sub(\"[\\s+\\.\\!\\/_,$?)%^*:(+\\\"\\']+|[+——！，。？、~@#￥%……&*（）]+\", \" \", list)\n",
    "        lists_new.append(string)\n",
    "    return lists_new\n",
    "\n",
    "def preprocess(corpus):\n",
    "    '''\n",
    "    输入：\n",
    "    corpus: [['i am a student'], ['he is a teacher'],...]\n",
    "    输出：\n",
    "    '''\n",
    "    text = ' '.join(corpus)\n",
    "    text = text.lower()  # 转小写\n",
    "    words = text.split()  # 分词\n",
    "    wordcounter = Counter(words) \n",
    "    vocab_words = sorted(wordcounter) #set(words)\n",
    "    vocab2id = {word: i for i, word in enumerate(vocab_words)}\n",
    "    id2vocab = {i: word for i, word in enumerate(vocab_words)}\n",
    "    del text\n",
    "    return vocab2id, id2vocab, wordcounter\n",
    "\n",
    "def tokenize(corpus):\n",
    "    '''\n",
    "    输入：\n",
    "    corpus: [['i','am', 'a','student'], ['he','is','a','teacher'],...]\n",
    "    输出：\n",
    "    \n",
    "    '''\n",
    "    words = []\n",
    "    for i in corpus:\n",
    "        words += i\n",
    "    wordcounter = Counter(words) \n",
    "    vocab_words = sorted(wordcounter)\n",
    "    vocab2id = {word: i for i, word in enumerate(vocab_words)}\n",
    "    id2vocab = {i: word for i, word in enumerate(vocab_words)}\n",
    "    del words\n",
    "    return vocab2id, id2vocab, wordcounter\n",
    "\n",
    "def split_corpus(corpus):\n",
    "    '''\n",
    "    输入：\n",
    "    corpus: [['i am a student'], ['he is a teacher'],...]\n",
    "    输出：\n",
    "    [['i','am', 'a','student'], ['he','is','a','teacher'],...]\n",
    "    '''\n",
    "    nc = []\n",
    "    for s in corpus:\n",
    "        sl = s.split()\n",
    "        nc.append([a for a in sl])\n",
    "    return nc\n",
    "\n",
    "def vectorization(corpus, vocab2id):\n",
    "    '''\n",
    "    输入：\n",
    "    corpus: [['i','am', 'a','student'], ['he','is','a','teacher'],...]\n",
    "    输出：\n",
    "    [[0,1,2,3], [4,5,2,6],...]\n",
    "    '''\n",
    "    dc = []\n",
    "    for s in corpus:\n",
    "        dc.append([vocab2id[a] for a in s])\n",
    "    return dc\n",
    "\n",
    "def dump_to_file(filepath, t):\n",
    "    nx = t.nonzero()\n",
    "    lx = [[(a[0],a[1]),t[a[0],a[1]]] for a in zip(nx[0],nx[1])]\n",
    "    with open(filepath, \"wb\") as f:\n",
    "        pickle.dump(lx,f)\n",
    "    del lx\n",
    "    del nx\n",
    "        \n",
    "def load_from_file(filepath,t):\n",
    "    lx = []\n",
    "    with open(filepath, \"rb\") as f:\n",
    "        lx = pickle.load(f)\n",
    "    for i in lx:\n",
    "        xy = i[0]\n",
    "        v = i[1]\n",
    "        t[xy[0],xy[1]] = v\n",
    "    del lx\n",
    "\n",
    "def read_data(data_path, limit=-1,ctype=0):\n",
    "    datas = []\n",
    "    lc = 0\n",
    "    if limit == 0:\n",
    "        return datas\n",
    "    with open(data_path, \"r\", encoding=\"utf8\") as f:\n",
    "        for line in f:\n",
    "            words = line.strip().lower()\n",
    "            if ctype != 0:\n",
    "\n",
    "                words = words.split()\n",
    "            datas.append(words)\n",
    "            lc += 1\n",
    "            if limit == -1:\n",
    "                continue\n",
    "            else:\n",
    "                if lc >= limit:\n",
    "                    return datas\n",
    "    return datas\n",
    "\n",
    "def write_data(data_path, cl, ctype=0):\n",
    "    with open(data_path, \"w\", encoding=\"utf8\") as f:\n",
    "        for line in cl:\n",
    "            if ctype==0:\n",
    "                f.write(line+'\\n')\n",
    "            else:\n",
    "                for a in line:\n",
    "                    f.write(str(a)+' ')\n",
    "                f.write('\\n')\n",
    "\n",
    "def t_savetoc(data_path,t,r,c):\n",
    "    nx = t.nonzero()\n",
    "    lx = [[a[0],a[1],t[a[0],a[1]]] for a in zip(nx[0],nx[1])]\n",
    "    lx = sorted(lx, key=itemgetter(1,0),reverse=True) \n",
    "    with open(data_path, \"w\") as f:\n",
    "        f.write('%d %d %d'%(r,c,len(t)))\n",
    "        for a in lx :\n",
    "                f.write(' %d %d %f'%(a[0],a[1],a[2]))\n",
    "\n",
    "def corpus_savetoc(data_path, corpus):\n",
    "    with open(data_path, \"w\") as f:\n",
    "        f.write('%d %d\\n'%(len(corpus),max([len(a) for a in corpus])))\n",
    "        for al in corpus:\n",
    "            f.write('%d'%(len(al)))\n",
    "            for a in al:\n",
    "                f.write(' %d'%(a))\n",
    "            f.write('\\n')\n",
    "\n",
    "def savedic(data_path, obj):\n",
    "    with open(data_path, \"wb\") as f:\n",
    "        pickle.dump(obj, f)\n",
    "\n",
    "def NE_replace(corpus, idx):\n",
    "    '''\n",
    "    替代语料库的命名实体\n",
    "    corpus：尚未分词的语料库\n",
    "    idx: 表示语言\n",
    "    '''\n",
    "    if idx == 2:\n",
    "        nlp = spacy.load('ro_core_news_sm')\n",
    "    else:\n",
    "        nlp = spacy.load('en_core_web_sm')\n",
    "    newcorp = []\n",
    "    for txt in tqdm(corpus):\n",
    "        doc = nlp(txt)\n",
    "        for entity in doc.ents:\n",
    "            txt = txt.replace(entity.text, '['+entity.label_+']')\n",
    "        newcorp.append(txt)\n",
    "    return newcorp\n",
    "\n",
    "corpus1_file = 'europarl-v7.ro-en.en' # 英语语料文件\n",
    "corpus2_file = 'europarl-v7.ro-en.ro' # 德语语料文件\n",
    "corpus1_slim = 'europarl-v7.ro-en-s.en' # 英语语料文件\n",
    "corpus2_slim = 'europarl-v7.ro-en-s.ro' # 德语语料文件\n",
    "vcorpus1_slim = 'europarl-v7.ro-en-vs.en' # 英语语料文件\n",
    "vcorpus2_slim = 'europarl-v7.ro-en-vs.ro' # 德语语料文件\n",
    "\n",
    "corpus1 = read_data(corpus1_file)#,1000)\n",
    "corpus2 = read_data(corpus2_file)#,1000)\n",
    "# write_data(corpus1_slim, corpus1)\n",
    "# # write_data(corpus2_slim, corpus2)\n",
    "# del corpus1\n",
    "# del corpus2\n",
    "\n",
    "# corpus1 = read_data(corpus1_slim)\n",
    "# corpus2 = read_data(corpus2_slim)\n",
    "# corpus1 = read_data(corpus1_file)\n",
    "# corpus2 = read_data(corpus2_file)\n",
    "print(len(corpus1),len(corpus2))\n",
    "\n",
    "# 删除语料库中空行\n",
    "remove_null_line(corpus1,corpus2)\n",
    "print(len(corpus1),len(corpus2))\n",
    "\n",
    "#删除标点符号\n",
    "corpus1 = remove_punc(corpus1)\n",
    "corpus2 = remove_punc(corpus2)\n",
    "\n",
    "# 进行命名实体识别并将语料库中的命名实体进行替换\n",
    "print('NE replacing for english corpus:')\n",
    "ecorpus1 = NE_replace(corpus1, 1)\n",
    "del corpus1\n",
    "print('NE replacing for romanian corpus:')\n",
    "ecorpus2 = NE_replace(corpus2, 2)\n",
    "del corpus2\n",
    "\n",
    "#完成分词，转化为列表形式\n",
    "ncorpus1 = split_corpus(ecorpus1)\n",
    "ncorpus2 = split_corpus(ecorpus2)\n",
    "\n",
    "del ecorpus1\n",
    "del ecorpus2\n",
    "\n",
    "#去除语料中不认识的单词\n",
    "print('Removing unknown English words:')\n",
    "remove_unknown(ncorpus1,1)\n",
    "print('Removing unknown romanian words:')\n",
    "remove_unknown(ncorpus2,2)\n",
    "\n",
    "# 删除语料库中空行\n",
    "remove_null_line(ncorpus1,ncorpus2)\n",
    "print(len(ncorpus1),len(ncorpus2))\n",
    "\n",
    "#得到向量化字典与词汇统计字典，得到词汇空间\n",
    "vocab2id_1, id2vocab_1, wordcounter1 = tokenize(ncorpus1)\n",
    "vocab2id_2, id2vocab_2, wordcounter2 = tokenize(ncorpus2)\n",
    "vocabsize_1 = len(vocab2id_1)\n",
    "vocabsize_2 = len(vocab2id_2)\n",
    "\n",
    "print(vocabsize_1,vocabsize_2)\n",
    "\n",
    "#得到列表化与向量化的语料库\n",
    "vcorpus1 = vectorization(ncorpus1, vocab2id_1)\n",
    "vcorpus2 = vectorization(ncorpus2, vocab2id_2)\n",
    "\n",
    "savedic('data/vocab2id_1.pkl',vocab2id_1 )\n",
    "savedic('data/vocab2id_2.pkl',vocab2id_2 )\n",
    "savedic('data/id2vocab_1.pkl',id2vocab_1 )\n",
    "savedic('data/id2vocab_2.pkl',id2vocab_2 )\n",
    "savedic('data/wordcounter_1.pkl',wordcounter1 )\n",
    "savedic('data/wordcounter_2.pkl',wordcounter2 )\n",
    "savedic('data/corpus_e.pkl',vcorpus1 )\n",
    "savedic('data/corpus_f.pkl',vcorpus2 )\n",
    "\n",
    "corpus_savetoc('data/corpus_e.txt',vcorpus1)\n",
    "corpus_savetoc('data/corpus_f.txt',vcorpus2)\n",
    "\n",
    "#删除原始语料库\n",
    "del vcorpus1\n",
    "del vcorpus2\n",
    "\n",
    "get_mini_set(ncorpus1,ncorpus2,wordcounter1,wordcounter2)\n",
    "\n",
    "#得到列表化与向量化的语料库\n",
    "vcorpus1 = vectorization(ncorpus1, vocab2id_1)\n",
    "vcorpus2 = vectorization(ncorpus2, vocab2id_2)\n",
    "\n",
    "#删除原始语料库\n",
    "del ncorpus1\n",
    "del ncorpus2\n",
    "\n",
    "def initial_value(corpus_e, corpus_f,vocabsize_e,vocabsize_f,t,verbose=1):\n",
    "    t1 = time.time()\n",
    "#     vocabsize_e = len(vocab2id_e)\n",
    "#     vocabsize_f = len(vocab2id_f)\n",
    "    N = len(corpus_e)\n",
    "    for i in range(N):\n",
    "        E = corpus_e[i]\n",
    "        F = corpus_f[i]\n",
    "        for f in F:\n",
    "            for e in E:\n",
    "                t[e,f] += 1\n",
    "    nt = t.nonzero()\n",
    "    lt = [a for a in zip(nt[0],nt[1])]\n",
    "    ct = [[] for a in range(vocabsize_f)]\n",
    "    for a in lt:\n",
    "        ct[a[1]].append(a)\n",
    "    for a in ct:\n",
    "        if len(a) == 0:\n",
    "            continue\n",
    "        w = 0\n",
    "        for b in a:\n",
    "            w += t[b[0],b[1]]\n",
    "        for b in a:\n",
    "            t[b[0],b[1]] = t[b[0],b[1]]/w\n",
    "    del lt\n",
    "    del ct\n",
    "    del nt\n",
    "    \n",
    "    t2 = time.time()\n",
    "    if verbose == 1:\n",
    "        print(r'Time consumption of initalizing t(e|f):',t2 - t1)\n",
    "        print('nonzero items of t(e|f): %d, total items:%d, ratio:%f'%(len(t),\n",
    "                                                                       vocabsize_e*vocabsize_f,\n",
    "                                                                       len(t)/(vocabsize_e*vocabsize_f)))\n",
    "\n",
    "t = dok_matrix((vocabsize_1,vocabsize_2),dtype=np.float32)\n",
    "initial_value(vcorpus1,vcorpus2,vocabsize_1,vocabsize_2,t)\n",
    "\n",
    "savedic('data/init_t.pkl',t )\n",
    "\n",
    "t_savetoc('data/init_t.txt',t,vocabsize_1,vocabsize_2)\n",
    "\n",
    "corpus_savetoc('data/corpus_e_s.txt',vcorpus1)\n",
    "corpus_savetoc('data/corpus_f_s.txt',vcorpus2)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "811467fd",
   "metadata": {},
   "source": [
    "#### 3.3.2 矩阵初始化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "28c85454",
   "metadata": {},
   "outputs": [],
   "source": [
    "def initial_value(corpus_e, corpus_f,vocabsize_e,vocabsize_f,t,verbose=1):\n",
    "    t1 = time.time()\n",
    "#     vocabsize_e = len(vocab2id_e)\n",
    "#     vocabsize_f = len(vocab2id_f)\n",
    "    N = len(corpus_e)\n",
    "    for i in range(N):\n",
    "        E = corpus_e[i]\n",
    "        F = corpus_f[i]\n",
    "        for f in F:\n",
    "            for e in E:\n",
    "                t[e,f] += 1\n",
    "    nt = t.nonzero()\n",
    "    lt = [a for a in zip(nt[0],nt[1])]\n",
    "    ct = [[] for a in range(vocabsize_f)]\n",
    "    for a in lt:\n",
    "        ct[a[1]].append(a)\n",
    "    for a in ct:\n",
    "        if len(a) == 0:\n",
    "            continue\n",
    "        w = 0\n",
    "        for b in a:\n",
    "            w += t[b[0],b[1]]\n",
    "        for b in a:\n",
    "            t[b[0],b[1]] = t[b[0],b[1]]/w\n",
    "    del lt\n",
    "    del ct\n",
    "    del nt\n",
    "    \n",
    "    t2 = time.time()\n",
    "    if verbose == 1:\n",
    "        print(r'Time consumption of initalizing t(e|f):',t2 - t1)\n",
    "        print('nonzero items of t(e|f): %d, total items:%d, ratio:%f'%(len(t),\n",
    "                                                                       vocabsize_e*vocabsize_f,\n",
    "                                                                       len(t)/(vocabsize_e*vocabsize_f)))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4132f897",
   "metadata": {},
   "source": [
    "#### 3.3.3 稀疏矩阵+增量EM算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b5eedad0",
   "metadata": {},
   "outputs": [],
   "source": [
    "#include <iostream>\n",
    "#include <stdio.h>\n",
    "#include <fstream>\n",
    "#include <sstream>\n",
    "#include<time.h>\n",
    "#include <string.h>\n",
    "#include <cmath>\n",
    "#include <stdlib.h>\n",
    "\n",
    "using namespace std;\n",
    "\n",
    "class progress{\n",
    "\tprivate:\n",
    "\tint m_nPos;\n",
    "\tclock_t m_t0;\n",
    "\n",
    "\tpublic:\n",
    "\tprogress(){\n",
    "\t\tm_nPos=0;\n",
    "\t\tm_t0=clock();\n",
    "\t};\n",
    "\t~progress(){\n",
    "\t\tprintf(\"\\n\");\n",
    "\t};\n",
    "\tvoid reset(int pos){\n",
    "\t\tif(pos<0 || pos>100){\n",
    "\t\t\tm_nPos = 0;\n",
    "\t\t}\n",
    "\t\telse{\n",
    "\t\t\tm_nPos = pos;\n",
    "\t\t}\n",
    "\t\tm_t0=clock();\n",
    "\t\tclock_t t1 = m_t0;\n",
    "\t\tdouble tnow=(double)(t1-m_t0)/CLOCKS_PER_SEC;\n",
    "\t\tprintf(\"%3d%%,%10ds\",m_nPos,(int)tnow);\n",
    "\t};\n",
    "\tvoid step(int pos){\n",
    "\t\tif(pos<0 || pos>100){\n",
    "\t\t\treturn;\n",
    "\t\t}\n",
    "\t\telse if (pos == m_nPos){\n",
    "\t\t\treturn;\n",
    "\t\t}\n",
    "\t\telse{\n",
    "\t\t\tm_nPos = pos;\n",
    "\t\t\tclock_t t1 = clock();\n",
    "\t\t\tdouble tnow=(double)(t1-m_t0)/CLOCKS_PER_SEC;\n",
    "\t\t\tprintf(\"\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b%3d%%,%10ds\",m_nPos,(int)tnow);\n",
    "\t\t}\n",
    "\t\t//printf(\"\\b\\b\\b\\b%3d%%\",m_nPos);\n",
    "\t};\n",
    "};\n",
    "\n",
    "typedef struct tagMyStruct{\n",
    "    float value;\n",
    "    int x;\n",
    "    int y;\n",
    "    struct tagMyStruct * down;\n",
    "    //struct tagMyStruct * right;\n",
    "} tSMItem, * pItem;\n",
    "\n",
    "class SMIter;\n",
    "\n",
    "class SparseMatrix{\n",
    "private:\n",
    "\tfriend class SMIter;\n",
    "\tstatic float epsilon;\n",
    "\ttSMItem ** smcol;\n",
    "\tint r,c;\n",
    "\tint total;\n",
    "\n",
    "\tvoid destroy(){\n",
    "\t\ttSMItem * p, * t;\n",
    "\t\tfor(int i = 0; i<c;i++){\n",
    "\t\t\t p = smcol[i];\n",
    "\t\t\t while(p!=NULL){\n",
    "\t\t\t\t t = p->down;\n",
    "\t\t\t\t delete p;\n",
    "\t\t\t\t p = t;\n",
    "\t\t\t }\n",
    "\t\t}\n",
    "\t\tdelete[] smcol;\n",
    "\t}\n",
    "\n",
    "\tvoid init(int row, int col)\n",
    "\t{\n",
    "\t    r = row;\n",
    "\t    c = col;\n",
    "\t    //smrow = new pItem[r];\n",
    "\t    smcol = new pItem[c];\n",
    "\t    /*for(int i=0; i<r; i++){\n",
    "\t        smrow[i] = NULL;\n",
    "\t    }*/\n",
    "\t    for(int j=0;j<c;j++){\n",
    "\t        smcol[j] = NULL;\n",
    "\t    }\n",
    "\t    total = 0;\n",
    "\n",
    "\t}\n",
    "\n",
    "public:\n",
    "\n",
    "\tfloat get(int x, int y)\n",
    "\t{\n",
    "\t    //tSMItem * row = smrow[x];\n",
    "\t    tSMItem * col = smcol[y];\n",
    "\n",
    "\t    if( col == NULL){\n",
    "\t    \treturn 0.0;\n",
    "\t    }\n",
    "\n",
    "\t    do{\n",
    "\t    \tif (col->x == x){\n",
    "\t    \t\treturn col->value;\n",
    "\t    \t}\n",
    "\t    \telse if(col->x > x){\n",
    "\t    \t\treturn 0.0;\n",
    "\t    \t}\n",
    "\t    \tcol = col->down;\n",
    "\t    }while(col != NULL);\n",
    "\t    return 0.0;\n",
    "\t}\n",
    "\n",
    "\n",
    "\n",
    "\tvoid set(int x, int y, float v)\n",
    "\t{\n",
    "\t    tSMItem * col = smcol[y];\n",
    "\n",
    "\t    if( col == NULL){\n",
    "\t    \tif(v > epsilon){\n",
    "\t    \t\tcol = new tSMItem;\n",
    "\t    \t\tcol->down = NULL;\n",
    "\t    \t\tcol->x = x;\n",
    "\t    \t\tcol->y = y;\n",
    "\t    \t\tcol->value = v;\n",
    "\t    \t\tsmcol[y] = col;\n",
    "\t    \t\ttotal++;\n",
    "\t    \t}\n",
    "\t    \treturn;\n",
    "\t    }\n",
    "\t    tSMItem ** pre = &(smcol[y]);\n",
    "\t    do{\n",
    "\t    \tif (col->x == x){\n",
    "\t    \t\tif(v <= epsilon){\n",
    "\t    \t\t\t*pre = (col->down);\n",
    "\t    \t\t\tdelete col;\n",
    "\t    \t\t\ttotal--;\n",
    "\t    \t\t}\n",
    "\t    \t\telse{\n",
    "\t    \t\t\tcol->value = v;\n",
    "\t    \t\t}\n",
    "\t    \t\treturn;\n",
    "\t    \t}\n",
    "\t    \telse if(col->x > x){\n",
    "\t    \t\tif (v <= epsilon) return;\n",
    "\t    \t\ttSMItem * p = new tSMItem;\n",
    "\t    \t\tp->down = col;\n",
    "\t    \t\tp->x = x;\n",
    "\t    \t\tp->y = y;\n",
    "\t    \t\tp->value = v;\n",
    "\t    \t\t*pre = p;\n",
    "\t    \t\ttotal++;\n",
    "\t    \t\treturn;\n",
    "\t    \t}\n",
    "\t    \tpre = &(col->down);\n",
    "\t    \tcol = col->down;\n",
    "\t    \tif(col == NULL){\n",
    "\t    \t\tif (v <= epsilon) return;\n",
    "\t\t\t\ttSMItem * p = new tSMItem;\n",
    "\t\t\t\tp->down = col;\n",
    "\t\t\t\tp->x = x;\n",
    "\t\t\t\tp->y = y;\n",
    "\t\t\t\tp->value = v;\n",
    "\t\t\t\t*pre = p;\n",
    "\t\t\t\ttotal++;\n",
    "\t\t\t\treturn;\n",
    "\t    \t}\n",
    "\t    }while(col != NULL);\n",
    "\t}\n",
    "\n",
    "public:\n",
    "\tSparseMatrix(int r, int c){\n",
    "\t\tinit(r,c);\n",
    "\t}\n",
    "\t~SparseMatrix(){\n",
    "\t\tdestroy();\n",
    "\t}\n",
    "\n",
    "\tint Rows(){\n",
    "\t\treturn r;\n",
    "\t}\n",
    "\tint Cols(){\n",
    "\t\treturn c;\n",
    "\t}\n",
    "\tvoid copy(SparseMatrix & a);\n",
    "\tfloat norm(SparseMatrix & a);\n",
    "\tvoid reset(){\n",
    "\t\tdestroy();\n",
    "\t\tinit(r,c);\n",
    "\t}\n",
    "\tSMIter * getIterator();\n",
    "\tvoid print();\n",
    "\tvoid save(const char * filename);\n",
    "\tvoid load(const char * filename);\n",
    "\tvoid fastload(const char * filename);\n",
    "};\n",
    "\n",
    "#define EPSILON 0.000001;\n",
    "float SparseMatrix::epsilon = EPSILON;\n",
    "\n",
    "class SMIter{\n",
    "private:\n",
    "\tpItem p;\n",
    "\tint y;\n",
    "\tSparseMatrix *s;\n",
    "public:\n",
    "\tSMIter(SparseMatrix * t){\n",
    "\t\ts = t;\n",
    "\t\tp = NULL;\n",
    "\t\ty = 0;\n",
    "\t}\n",
    "\tint Next(tSMItem * h){\n",
    "\t\ttSMItem * t;\n",
    "\t\tif(p == NULL){\n",
    "\t\t\tfor(int i = y;i<s->Cols();i++){\n",
    "\t\t\t\tt = s->smcol[i];\n",
    "\t\t\t\tif(t == NULL) continue;\n",
    "\t\t\t\t*h = *t;\n",
    "\t\t\t\tp = t;\n",
    "\t\t\t\ty = i;\n",
    "\t\t\t\treturn 1;\n",
    "\t\t\t}\n",
    "\t\t\treturn 0;\n",
    "\t\t}\n",
    "\t\telse{\n",
    "\t\t\tif(p->down!=NULL){\n",
    "\t\t\t\tp = p->down;\n",
    "\t\t\t\t*h = *p;\n",
    "\t\t\t\treturn 1;\n",
    "\t\t\t}\n",
    "\t\t\telse{\n",
    "\t\t\t\tfor(int i = y+1;i<s->Cols();i++){\n",
    "\t\t\t\t\tt = s->smcol[i];\n",
    "\t\t\t\t\tif(t == NULL) continue;\n",
    "\t\t\t\t\t*h = *t;\n",
    "\t\t\t\t\tp = t;\n",
    "\t\t\t\t\ty = i;\n",
    "\t\t\t\t\treturn 1;\n",
    "\t\t\t\t}\n",
    "\t\t\t\treturn 0;\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\t}\n",
    "};\n",
    "\n",
    "SMIter * SparseMatrix::getIterator()\n",
    "{\n",
    "\t\tSMIter * p = new SMIter(this);\n",
    "\t\treturn p;\n",
    "}\n",
    "\n",
    "void SparseMatrix::print()\n",
    "{\n",
    "\tprintf(\"total: %d\\n\",total);\n",
    "\tSMIter * it = getIterator();\n",
    "\ttSMItem as;\n",
    "\twhile(it->Next(&as)){\n",
    "\t\tprintf(\"(%d,%d) %f\\n\",as.x,as.y,as.value);\n",
    "\t}\n",
    "\tdelete it;\n",
    "}\n",
    "\n",
    "void SparseMatrix::save(const char * filename)\n",
    "{\n",
    "\tofstream ofs(filename);\n",
    "\tofs<<r<<\" \"<<c<<\" \"<<total;\n",
    "\tSMIter * it = getIterator();\n",
    "\ttSMItem as;\n",
    "\twhile(it->Next(&as)){\n",
    "\t\tofs<<\" \"<<as.x<<\" \"<<as.y<<\" \"<<as.value;\n",
    "\t}\n",
    "\tdelete it;\n",
    "\tofs.close();\n",
    "}\n",
    "\n",
    "char * tokenparse(char * buf, double * val)\n",
    "{\n",
    "\tdouble d=0.0;\n",
    "\tint n = 0;\n",
    "\tchar ds[256];\n",
    "\tchar * p = buf;\n",
    "\twhile(*p!='\\0'){\n",
    "\t\tif(*p == ' '){\n",
    "\t\t\tif(n!=0){\n",
    "\t\t\t\tstrncpy(ds,buf,n);\n",
    "\t\t\t\tds[n] = '\\0';\n",
    "\t\t\t\td = atof(ds);\n",
    "\t\t\t}\n",
    "\t\t\t*val = d;\n",
    "\t\t\treturn p+1;\n",
    "\t\t}\n",
    "\t\tp++;\n",
    "\t\tn++;\n",
    "\t}\n",
    "\treturn p;\n",
    "}\n",
    "\n",
    "void SparseMatrix::fastload(const char * filename)\n",
    "{\n",
    "\tprogress prg;\n",
    "\tclock_t t0,t1;\n",
    "\tFILE *fp=fopen(filename,\"r\");  \n",
    "    if(!fp){\n",
    "\t\tprintf(\"Can't open %s!\\n\",filename);\n",
    "\t\treturn;\n",
    "\t} \n",
    "    fseek(fp,0L,SEEK_END);  \n",
    "    int size=ftell(fp);  \n",
    "\tchar * buf;\n",
    "\tbuf = new char[size+1];\n",
    "\tif (buf == NULL){\n",
    "\t\tprintf(\"Can't allocate enouph memorty!\");\n",
    "\t\tfclose(fp);\n",
    "\t\treturn;\n",
    "\t}\n",
    "\tfseek(fp,0L,SEEK_SET);\n",
    "\tt0 = clock();\n",
    "\tif(fread(buf,sizeof(char), size, fp)!=size*sizeof(char)){\n",
    "\t\tprintf(\"read error!\\n\");\n",
    "\t\tfclose(fp);\n",
    "\t\treturn;\n",
    "\t}\n",
    "    fclose(fp);\n",
    "\tt1 = clock();\n",
    "\tprintf(\"Time cost for loading %s: %d\\n\",filename, (int)((t1-t0)*1.0/CLOCKS_PER_SEC));\n",
    "\tbuf[size] = '\\0';\n",
    "\tchar *p = buf;\n",
    "\tint r,c,t,x,y;\n",
    "\tdouble v;\n",
    "\tp=tokenparse(p,&v); //read r\n",
    "\tr = (int)v;\n",
    "\tp=tokenparse(p,&v); //read c\n",
    "\tc = (int)v;\n",
    "\tp=tokenparse(p,&v); //read t\n",
    "\tt = (int)v;\n",
    "\tdestroy();\n",
    "\tinit(r,c);\n",
    "\tprintf(\"Dimension of t is:(%d,%d), non-zero numbers is : %d\\n\",r,c,t);\n",
    "\tfloat mem_MB = sizeof(tSMItem)*t*1.0/(1024.*1024.);\n",
    "\tfloat mem_GB = mem_MB/1024.;\n",
    "\tprintf(\"Memory cost(GB/MB): %.2f/%.2f\\n\", mem_GB,mem_MB);\n",
    "\tprintf(\"Reading t from data file : \");\n",
    "\tprg.reset(0);\n",
    "\tfor(int i = 0; i<t; i++){\n",
    "\t\tp=tokenparse(p,&v); //read x\n",
    "\t\tx = (int)v;\n",
    "\t\tp=tokenparse(p,&v); //read y\n",
    "\t\ty = (int)v;\n",
    "\t\tp=tokenparse(p,&v); //read v\n",
    "\t\tset(x,y,v);\n",
    "\t\tprg.step((int)((i+1)*100/t));\n",
    "\t}\n",
    "\tdelete[] buf;  \n",
    "}\n",
    "\n",
    "void SparseMatrix::load(const char * filename)\n",
    "{\n",
    "\tifstream ifs(filename);\n",
    "\tint r,c,t,x,y;\n",
    "\tfloat v;\n",
    "\tprogress prg;\n",
    "\tifs>>r>>c>>t;\n",
    "\tdestroy();\n",
    "\tinit(r,c);\n",
    "\tprintf(\"Dimension of t is:(%d,%d), non-zero numbers is : %d\\n\",r,c,t);\n",
    "\tfloat mem_MB = sizeof(tSMItem)*t*1.0/(1024.*1024.);\n",
    "\tfloat mem_GB = mem_MB/1024.;\n",
    "\tprintf(\"Memory cost(GB/MB): %.2f/%.2f\\n\", mem_GB,mem_MB);\n",
    "\tprintf(\"Reading t from data file : \");\n",
    "\tprg.reset(0);\n",
    "\tfor(int i = 0; i<t; i++){\n",
    "\t\tifs >>x>>y>>v;\n",
    "\t\tset(x,y,v);\n",
    "\t\tprg.step((int)((i+1)*100/t));\n",
    "\t}\n",
    "\t//printf(\"finished!\\n\");\n",
    "\tifs.close();\n",
    "}\n",
    "\n",
    "void SparseMatrix::copy(SparseMatrix & a)\n",
    "{\n",
    "\tdestroy();\n",
    "\tinit(a.Rows(),a.Cols());\n",
    "\tSMIter * it = a.getIterator();\n",
    "\ttSMItem as;\n",
    "\twhile(it->Next(&as)){\n",
    "\t\tset(as.x,as.y,as.value);\n",
    "\t}\n",
    "\tdelete it;\n",
    "}\n",
    "\n",
    "float SparseMatrix::norm(SparseMatrix & a)\n",
    "{\n",
    "\tSMIter * me = getIterator();\n",
    "\ttSMItem as;\n",
    "\tfloat normv=0, tmp1,tmp2;\n",
    "\twhile(me->Next(&as)){\n",
    "\t\ttmp1=get(as.x,as.y);\n",
    "\t\ttmp2=a.get(as.x,as.y);\n",
    "\t\ttmp1= tmp1-tmp2;\n",
    "\t\tnormv +=tmp1*tmp1;\n",
    "\t}\n",
    "\tdelete me;\n",
    "\n",
    "\tSMIter * it = a.getIterator();\n",
    "\twhile(it->Next(&as)){\n",
    "\t\ttmp1 = get(as.x,as.y);\n",
    "\t\tif(tmp1!=0) continue;\n",
    "\t\ttmp2 = 0-a.get(as.x,as.y);\n",
    "\t\ttmp2 *= tmp2;\n",
    "\t\tnormv += tmp2;\n",
    "\t}\n",
    "\tdelete it;\n",
    "\tif(normv == 0) normv += epsilon;\n",
    "\tnormv = sqrt(normv);\n",
    "\treturn normv;\n",
    "}\n",
    "\n",
    "int ** alloc_corpus(int linenum, int sentnum)\n",
    "{\n",
    "\tint **array = new int*[linenum];\n",
    "\tfor (int i = 0; i < linenum; i++)\n",
    "\t{\n",
    "\t    array[i] = new int[sentnum];\n",
    "\t    //memset(array[i], -1, sizeof(int));\n",
    "\t    for(int j = 0; j<sentnum; j++)\n",
    "\t    \tarray[i][j]=-1;\n",
    "\t}\n",
    "\treturn array;\n",
    "}\n",
    "\n",
    "void release_corpus(int ** array, int linenum)\n",
    "{\n",
    "\tfor (int i = 0; i < linenum; i++)\n",
    "\t{\n",
    "\t\tdelete[] array[i];\n",
    "\t}\n",
    "\tdelete[] array;\n",
    "}\n",
    "\n",
    "int ** read_corpus(const char * filename, int * linenum, int * sentnum)\n",
    "{\n",
    "\t//stringstream ss;\n",
    "\tifstream ifs(filename);\n",
    "\tint nlines, sentmax;\n",
    "\tint LINE_LENGTH = 4096;\n",
    "\tprogress prg;\n",
    "\tchar *buf, *p;\n",
    "\tint thislinelen;\n",
    "\tbuf = new char[LINE_LENGTH];\n",
    "\tifs.getline(buf,LINE_LENGTH,'\\n');\n",
    "\tsscanf(buf, \"%d %d\", &nlines, &sentmax);\n",
    "\tprintf(\"corpus filename: %s, lines: %d, max sentence length: %d\\n\",filename, nlines, sentmax);\n",
    "\tfloat mem_MB = sizeof(int)*nlines*sentmax*1.0/(1024.*1024.);\n",
    "\tfloat mem_GB = mem_MB/1024.;\n",
    "\tprintf(\"Memory cost(GB/MB): %.2f/%.2f\\n\",mem_GB,mem_MB);\n",
    "\tprintf(\"Loading %s file: \",filename);\n",
    "\tprg.reset(0);\n",
    "\tint **corpus1 = alloc_corpus(nlines, sentmax);\n",
    "\tfor(int i = 0; i < nlines; i++){\n",
    "\t\tifs.getline(buf,LINE_LENGTH,'\\n');\n",
    "\t\tsscanf(buf,\"%d\",&thislinelen);\n",
    "\t\tp = buf;\n",
    "\t\twhile((*p!=' ')&&(buf-p<LINE_LENGTH))p++;\n",
    "\t\tp++;\n",
    "\t\tfor(int j = 0; j<thislinelen; j++){\n",
    "\t\t\tsscanf(p,\"%d\",&(corpus1[i][j]));\n",
    "\t\t\t/*if(corpus1[i][j]>4518){\n",
    "\t\t\t\tprintf(\"%s:(%d,%d):%d\\n\",filename,i,j,corpus1[i][j]);\n",
    "\t\t\t}*/\n",
    "\t\t\twhile((*p!=' ')&&(buf-p<LINE_LENGTH))p++;\n",
    "\t\t\tp++;\n",
    "\t\t}\n",
    "\t\tprg.step((int)((i+1)*100./nlines));\n",
    "\t}\n",
    "\t\n",
    "\tifs.close();\n",
    "\tdelete[] buf;\n",
    "\t*linenum = nlines;\n",
    "\t*sentnum = sentmax;\n",
    "\treturn corpus1;\n",
    "}\n",
    "\n",
    "void em_proc_sparse(int **corpus_e, int **corpus_f, int corpus_len, int sentnum1, int sentnum2,\n",
    "\t\tint vocabsize_e, int vocabsize_f,SparseMatrix & t, int K = 5,\n",
    "\t\tfloat epsilon = 0.001, int maxturns=10000, int verbose=1)\n",
    "{\n",
    "\tSparseMatrix tn(1,1);\n",
    "\tSparseMatrix count(vocabsize_e, vocabsize_f);\n",
    "\tSparseMatrix total(vocabsize_f, 1);\n",
    "\tSparseMatrix s_total(vocabsize_e, 1);\n",
    "\t//progress prg;\n",
    "\ttn.copy(t);\n",
    "\tint turns = 0;\n",
    "\tfloat temp1,temp2,temp3,temp4;\n",
    "\tfloat normv;\n",
    "\tint g = 0;\n",
    "\tint start_f,end_f;\n",
    "\tint start_e,end_e;\n",
    "\t//int totalc,currentc;\n",
    "\tint blocksize_f = vocabsize_f / K;\n",
    "\tint blocksize_e = vocabsize_e / K;\n",
    "\tclock_t t0 = clock();\n",
    "\tclock_t t1=t0, t2;\n",
    "\t//corpus_len*sentnum1*sentnum2+corpus_len*sentnum1*sentnum2+(end-start)*vocabsize_e\n",
    "\twhile(true){\n",
    "\t\tg = (g+1)%K;\n",
    "\n",
    "        start_f = g*blocksize_f;\n",
    "        end_f = start_f + blocksize_f-1;\n",
    "        if (g == K-1) end_f = vocabsize_f-1;\n",
    "\n",
    "\t\tstart_e = g*blocksize_e;\n",
    "        end_e = start_e + blocksize_e-1;\n",
    "        if (g == K-1) end_e = vocabsize_e-1;\n",
    "\n",
    "\t\t//totalc = corpus_len*sentnum1*sentnum2+corpus_len*sentnum1*sentnum2+(end-start)*vocabsize_e;\n",
    "\t\t//printf(\"Turn %d:\",turns+1);\n",
    "\t\t//prg.reset(0);\n",
    "\t\tcount.reset();\n",
    "\t\ttotal.reset();\n",
    "\t\tfor(int i = 0; i < corpus_len; i++){\n",
    "\t\t\tfor (int j = 0; j < sentnum1; j++){\n",
    "\t\t\t\tif(corpus_e[i][j]==-1)break;\n",
    "\t\t\t\t//s_total.set(corpus_e[i][j],0,0);\n",
    "\t\t\t\ttemp1 = 0;\n",
    "\t\t\t\tfor(int k = 0; k < sentnum2; k++){\n",
    "\t\t\t\t\tif(corpus_f[i][k]==-1)break;\n",
    "\t\t\t\t\t//temp1 = s_total.get(corpus_e[i][j],0);\n",
    "\t\t\t\t\ttemp2 = t.get(corpus_e[i][j],corpus_f[i][k]);\n",
    "\t\t\t\t\ttemp1 += temp2;\n",
    "\t\t\t\t\t//currentc = i*sentnum1*sentnum2\n",
    "\t\t\t\t}\n",
    "\t\t\t\ts_total.set(corpus_e[i][j],0,temp1);\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\n",
    "\t\tfor(int i = 0; i < corpus_len; i++){\n",
    "\t\t\tfor (int j = 0; j < sentnum1; j++){\n",
    "\t\t\t\tif(corpus_e[i][j]==-1)break;\n",
    "\t\t\t\ttemp1 = s_total.get(corpus_e[i][j],0);\n",
    "\t\t\t\tif(temp1==0.0) temp1 = EPSILON;\n",
    "\t\t\t\tfor(int k = 0; k < sentnum2; k++){\n",
    "\t\t\t\t\tif(corpus_f[i][k]==-1)break;\n",
    "\t\t\t\t\ttemp2 = t.get(corpus_e[i][j],corpus_f[i][k]);\n",
    "\t\t\t\t\ttemp3 = total.get(corpus_f[i][k],0);\n",
    "\t\t\t\t\ttemp4 = count.get(corpus_e[i][j],corpus_f[i][k]);\n",
    "\t\t\t\t\tcount.set(corpus_e[i][j],corpus_f[i][k],temp4+temp2/temp1);\n",
    "\t\t\t\t\ttotal.set(corpus_f[i][k],0,temp3+temp2/temp1);\n",
    "\n",
    "\t\t\t\t}\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\n",
    "\t\t//for(int f = 0; f < vocabsize_f; f++){\n",
    "\t\tfor(int f = start_f; f < end_f+1; f++){\n",
    "\t\t\ttemp2 = total.get(f,0);\n",
    "\t\t\tif(temp2 ==0.0) temp2= EPSILON;\n",
    "\t\t\t//for (int e = 0; e < vocabsize_e; e++){\n",
    "\t\t\tfor (int e = start_e; e < end_e+1; e++){\n",
    "\t\t\t\ttemp1 = count.get(e,f);\n",
    "\t\t\t\tt.set(e,f, temp1/temp2);\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\n",
    "\t\tturns ++;\n",
    "\t\tnormv = t.norm(tn);\n",
    "\t\tt2 = clock();\n",
    "\n",
    "\t\tif (normv <= epsilon){\n",
    "\t\t\tif(verbose==1){\n",
    "\t\t\t\tprintf(\"after %d turns, calculation converged! Total time consumption: %f\\n\",turns, (double)(t2-t0)/CLOCKS_PER_SEC);\n",
    "\t\t\t}\n",
    "\t\t\tbreak;\n",
    "\t\t}\n",
    "\t\telse{\n",
    "\t\t\ttn.copy(t);\n",
    "\t\t}\n",
    "\t\tif(verbose == 1){\n",
    "\t\t\tprintf(\"reach %d turns. total: %f, current: %f, norm:%f\\n\",\n",
    "\t\t\t\t\tturns,(double)(t2-t0)/CLOCKS_PER_SEC,\n",
    "\t\t\t\t\t(double)(t2-t1)/CLOCKS_PER_SEC, normv);\n",
    "\t\t}\n",
    "\t\tt1 = t2;\n",
    "\t\tif(maxturns == -1) continue;\n",
    "\t\tif(turns >= maxturns){\n",
    "\t\t\tif (verbose==1){\n",
    "\t\t\t\tprintf(\"reach max turns(%d)! Total time consumption: %f\\n\",maxturns, (double)(t2-t0)/CLOCKS_PER_SEC);\n",
    "\t\t\t}\n",
    "\t\t\tbreak;\n",
    "\t\t}\n",
    "\t}\n",
    "}\n",
    "\n",
    "int main(int argc, char * argv[])\n",
    "{\n",
    "\tif(argc < 6){\n",
    "\t\tcout << \"not enough parameters!\"<<endl;\n",
    "\t\treturn 0;\n",
    "\t}\n",
    "\tint maxturn = atoi(argv[4]);\n",
    "\tint group = atoi(argv[5]);\n",
    "\tSparseMatrix t(5,5);\n",
    "\t//读入t(e|f)初始化值\n",
    "\tt.load(argv[1]);\n",
    "\tprintf(\"finished!\\n\");\n",
    "\treturn 0;\n",
    "\tint vocabsize_1 = t.Rows();\n",
    "\tint vocabsize_2 = t.Cols();\n",
    "\t//读入corpus_e数值\n",
    "\tint ** corpus1, linenum1, sentnum1;\n",
    "\tcorpus1 = read_corpus(argv[2],&linenum1, &sentnum1);\n",
    "\t//读入corpus_f数值\n",
    "\tint ** corpus2, linenum2, sentnum2;\n",
    "\tcorpus2 = read_corpus(argv[3],&linenum2, &sentnum2);\n",
    "\t//错误检查\n",
    "\tprogress prg;\n",
    "\tprintf(\"Error check for %s :\",argv[2]);\n",
    "\tprg.reset(0);\n",
    "\tint errcnt = 0;\n",
    "\tint firstdisplay = 10;\n",
    "\tfor(int i = 0; i<linenum1;i++){\n",
    "\t\tfor (int j = 0; j < sentnum1; j++){\n",
    "\t\t\tif(corpus1[i][j] == -1) break;\n",
    "\t\t\tif(corpus1[i][j]<0 || corpus1[i][j] >= t.Rows()){\n",
    "\t\t\t\terrcnt++;\n",
    "\t\t\t\tif(firstdisplay){\n",
    "\t\t\t\t\tfirstdisplay --;\n",
    "\t\t\t\t\tprintf(\"(%d,%d):%d\\n\",i,j,corpus1[i][j]);\n",
    "\t\t\t\t}\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\t\tprg.step((int)((i+1)*100./linenum1));\n",
    "\t}\n",
    "\tprintf(\"\\ncorpus1 errcnt: %d\\n\",errcnt);\n",
    "\tprintf(\"Error check for %s :\",argv[3]);\n",
    "\tprg.reset(0);\n",
    "\terrcnt = 0;\n",
    "\tfor(int i = 0; i<linenum2;i++){\n",
    "\t\tfor (int j = 0; j < sentnum2; j++){\n",
    "\t\t\tif(corpus2[i][j] == -1) break;\n",
    "\t\t\tif(corpus2[i][j]<0 || corpus2[i][j] >= t.Cols()){\n",
    "\t\t\t\terrcnt++;\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\t\tprg.step((int)((i+1)*100./linenum2));\n",
    "\t}\n",
    "\tprintf(\"\\ncorpus2 errcnt: %d\\n\",errcnt);\n",
    "\t//进行em迭代计算\n",
    "\tem_proc_sparse(corpus1,corpus2,linenum1,sentnum1,sentnum2,vocabsize_1,vocabsize_2,t,group,0.001,maxturn,1);\n",
    "\tt.save(\"em_result.txt\");\n",
    "\trelease_corpus(corpus1, linenum1);\n",
    "\trelease_corpus(corpus2, linenum2);\n",
    "\n",
    "    return 0;\n",
    "}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b41691c6",
   "metadata": {},
   "source": [
    "#### 3.3.4 二维数组+增量EM算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a3fc63d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#include <iostream>\n",
    "#include <stdio.h>\n",
    "#include <fstream>\n",
    "#include <sstream>\n",
    "#include<time.h>\n",
    "#include <string.h>\n",
    "#include <cmath>\n",
    "#include <stdlib.h>\n",
    "\n",
    "using namespace std;\n",
    "\n",
    "class progress{\n",
    "\tprivate:\n",
    "\tint m_nPos;\n",
    "\tclock_t m_t0;\n",
    "\n",
    "\tpublic:\n",
    "\tprogress(){\n",
    "\t\tm_nPos=0;\n",
    "\t\tm_t0=clock();\n",
    "\t};\n",
    "\t~progress(){\n",
    "\t\tprintf(\"\\n\");\n",
    "\t};\n",
    "\tvoid reset(int pos){\n",
    "\t\tif(pos<0 || pos>100){\n",
    "\t\t\tm_nPos = 0;\n",
    "\t\t}\n",
    "\t\telse{\n",
    "\t\t\tm_nPos = pos;\n",
    "\t\t}\n",
    "\t\tm_t0=clock();\n",
    "\t\tclock_t t1 = m_t0;\n",
    "\t\tdouble tnow=(double)(t1-m_t0)/CLOCKS_PER_SEC;\n",
    "\t\tprintf(\"%3d%%,%10ds\",m_nPos,(int)tnow);\n",
    "\t};\n",
    "\tvoid step(int pos){\n",
    "\t\tif(pos<0 || pos>100){\n",
    "\t\t\treturn;\n",
    "\t\t}\n",
    "\t\telse if (pos == m_nPos){\n",
    "\t\t\treturn;\n",
    "\t\t}\n",
    "\t\telse{\n",
    "\t\t\tm_nPos = pos;\n",
    "\t\t\tclock_t t1 = clock();\n",
    "\t\t\tdouble tnow=(double)(t1-m_t0)/CLOCKS_PER_SEC;\n",
    "\t\t\tprintf(\"\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b%3d%%,%10ds\",m_nPos,(int)tnow);\n",
    "\t\t}\n",
    "\t\t//printf(\"\\b\\b\\b\\b%3d%%\",m_nPos);\n",
    "\t};\n",
    "};\n",
    "\n",
    "void savet(const char * filename, float **tm, int r, int c)\n",
    "{\n",
    "\tofstream ofs(filename);\n",
    "\tint total=0;\n",
    "\tfor(int i= 0;i<r;i++){\n",
    "\t\tfor(int j = 0; j < c; j++){\n",
    "\t\t\tif(tm[i][j]!=0){\n",
    "\t\t\t\ttotal++;\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\t}\n",
    "\tofs<<r<<\" \"<<c<<\" \"<<total;\n",
    "\tfor(int i= 0;i<r;i++){\n",
    "\t\tfor(int j = 0; j < c; j++){\n",
    "\t\t\tif(tm[i][j]!=0){\n",
    "\t\t\t\tofs<<\" \"<<i<<\" \"<<j<<\" \"<<tm[i][j];\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\t}\n",
    "\tofs.close();\n",
    "}\n",
    "\n",
    "int ** alloc_corpus(int linenum, int sentnum)\n",
    "{\n",
    "\tint **array = new int*[linenum];\n",
    "\tfor (int i = 0; i < linenum; i++)\n",
    "\t{\n",
    "\t    array[i] = new int[sentnum];\n",
    "\t    //memset(array[i], -1, sizeof(int));\n",
    "\t    for(int j = 0; j<sentnum; j++)\n",
    "\t    \tarray[i][j]=-1;\n",
    "\t}\n",
    "\treturn array;\n",
    "}\n",
    "\n",
    "void release_corpus(int ** array, int linenum)\n",
    "{\n",
    "\tfor (int i = 0; i < linenum; i++)\n",
    "\t{\n",
    "\t\tdelete[] array[i];\n",
    "\t}\n",
    "\tdelete[] array;\n",
    "}\n",
    "\n",
    "float ** alloc_tm(int rows, int cols)\n",
    "{\n",
    "\tfloat **array = new float*[rows];\n",
    "\tfor (int i = 0; i < rows; i++)\n",
    "\t{\n",
    "\t    array[i] = new float[cols];\n",
    "\t    memset(array[i], 0, sizeof(float)*cols);\n",
    "\t}\n",
    "\treturn array;\n",
    "}\n",
    "\n",
    "void release_tm(float ** array, int rows)\n",
    "{\n",
    "\tfor (int i = 0; i < rows; i++)\n",
    "\t{\n",
    "\t\tdelete[] array[i];\n",
    "\t}\n",
    "\tdelete[] array;\n",
    "}\n",
    "\n",
    "float ** build_tm(const char * filename,int * row, int * col)\n",
    "{\n",
    "\tifstream ifs(filename);\n",
    "\tint r,c,t,x,y;\n",
    "\tfloat v;\n",
    "\tprogress prg;\n",
    "\tifs>>r>>c>>t;\n",
    "\t*row = r;\n",
    "\t*col = c;\n",
    "\tprintf(\"Dimension of t is:(%d,%d), non-zero numbers is : %d\\n\",r,c,t);\n",
    "\tfloat mem_MB = sizeof(float)*r*c*1.0/(1024.*1024.);\n",
    "\tfloat mem_GB = mem_MB/1024.;\n",
    "\tprintf(\"Memory cost(GB/MB): %.2f/%.2f\\n\", mem_GB,mem_MB);\n",
    "\tprintf(\"Reading t from data file : \");\n",
    "\tprg.reset(0);\n",
    "\tfloat ** tm = alloc_tm(r,c);\n",
    "\tfor(int i = 0; i<t; i++){\n",
    "\t\tifs >>x>>y>>v;\n",
    "\t\ttm[x][y] = v;\n",
    "\t\tprg.step((int)((i+1)*100/t));\n",
    "\t}\n",
    "\t//printf(\"finished!\\n\");\n",
    "\tifs.close();\n",
    "    return tm;\n",
    "}\n",
    "\n",
    "float ** build_tm(float ** t, int r, int c)\n",
    "{\n",
    "\tfloat ** tm = alloc_tm(r,c);\n",
    "\tfor(int i = 0; i<r; i++){\n",
    "\t\tmemcpy(tm[i],t[i],sizeof(float)*c);\n",
    "\t}\n",
    "    return tm;\n",
    "}\n",
    "\n",
    "float ** build_tm(int r, int c)\n",
    "{\n",
    "\tfloat ** tm = alloc_tm(r,c);\n",
    "\tfor(int i = 0; i<r; i++){\n",
    "\t\tmemset(tm[i],0,sizeof(float)*c);\n",
    "\t}\n",
    "    return tm;\n",
    "}\n",
    "\n",
    "void reset_tm(float **t, int r,int c)\n",
    "{\n",
    "\tfor(int i = 0; i<r; i++){\n",
    "\t\tmemset(t[i],0,sizeof(float)*c);\n",
    "\t}\n",
    "}\n",
    "\n",
    "float norm_tm(float **t1, float **t2, int r,int c)\n",
    "{\n",
    "\tfloat norm = 0.0;\n",
    "\tfor(int i = 0; i<r; i++){\n",
    "\t\tfor (int j = 0; j < c; j++)\n",
    "\t\t{\n",
    "\t\t\tnorm += fabsf(t1[i][j]-t2[i][j]);\n",
    "\t\t}\n",
    "\t}\n",
    "\treturn norm;\n",
    "}\n",
    "\n",
    "void copy_tm(float **t1, float **t2, int r,int c)\n",
    "{\n",
    "\tfor(int i = 0; i<r; i++){\n",
    "\t\tmemcpy(t1[i],t2[i],sizeof(float)*c);\n",
    "\t}\n",
    "}\n",
    "\n",
    "void reset_v(float *v, int n)\n",
    "{\n",
    "\tmemset(v,0,sizeof(float)*n);\n",
    "}\n",
    "\n",
    "int ** read_corpus(const char * filename, int * linenum, int * sentnum)\n",
    "{\n",
    "\t//stringstream ss;\n",
    "\tifstream ifs(filename);\n",
    "\tint nlines, sentmax;\n",
    "\tint LINE_LENGTH = 4096;\n",
    "\tprogress prg;\n",
    "\tchar *buf, *p;\n",
    "\tint thislinelen;\n",
    "\tbuf = new char[LINE_LENGTH];\n",
    "\tifs.getline(buf,LINE_LENGTH,'\\n');\n",
    "\tsscanf(buf, \"%d %d\", &nlines, &sentmax);\n",
    "\tprintf(\"corpus filename: %s, lines: %d, max sentence length: %d\\n\",filename, nlines, sentmax);\n",
    "\tfloat mem_MB = sizeof(int)*nlines*sentmax*1.0/(1024.*1024.);\n",
    "\tfloat mem_GB = mem_MB/1024.;\n",
    "\tprintf(\"Memory cost(GB/MB): %.2f/%.2f\\n\",mem_GB,mem_MB);\n",
    "\tprintf(\"Loading %s file: \",filename);\n",
    "\tprg.reset(0);\n",
    "\tint **corpus1 = alloc_corpus(nlines, sentmax);\n",
    "\tfor(int i = 0; i < nlines; i++){\n",
    "\t\tifs.getline(buf,LINE_LENGTH,'\\n');\n",
    "\t\tsscanf(buf,\"%d\",&thislinelen);\n",
    "\t\tp = buf;\n",
    "\t\twhile((*p!=' ')&&((int)(p-buf)<LINE_LENGTH))p++;\n",
    "\t\tp++;\n",
    "\t\tfor(int j = 0; j<thislinelen; j++){\n",
    "\t\t\tsscanf(p,\"%d\",&(corpus1[i][j]));\n",
    "\t\t\t/*if(corpus1[i][j]>4518){\n",
    "\t\t\t\tprintf(\"%s:(%d,%d):%d\\n\",filename,i,j,corpus1[i][j]);\n",
    "\t\t\t}*/\n",
    "\t\t\twhile((*p!=' ')&&((int)(p-buf)<LINE_LENGTH))p++;\n",
    "\t\t\tp++;\n",
    "\t\t}\n",
    "\t\tprg.step((int)((i+1)*100./nlines));\n",
    "\t}\n",
    "\t\n",
    "\tifs.close();\n",
    "\tdelete[] buf;\n",
    "\t*linenum = nlines;\n",
    "\t*sentnum = sentmax;\n",
    "\treturn corpus1;\n",
    "}\n",
    "\n",
    "short ** alloc_count(int rows, int cols)\n",
    "{\n",
    "\tshort **array = new short*[rows];\n",
    "\tfor (int i = 0; i < rows; i++)\n",
    "\t{\n",
    "\t    array[i] = new short[cols];\n",
    "\t    memset(array[i], 0, sizeof(short)*cols);\n",
    "\t}\n",
    "\treturn array;\n",
    "}\n",
    "\n",
    "void release_count(short ** array, int rows)\n",
    "{\n",
    "\tfor (int i = 0; i < rows; i++)\n",
    "\t{\n",
    "\t\tdelete[] array[i];\n",
    "\t}\n",
    "\tdelete[] array;\n",
    "}\n",
    "\n",
    "void reset_count(short **array, int rows, int cols)\n",
    "{\n",
    "    for (int i = 0; i < rows; i++)\n",
    "\t{\n",
    "\t    memset(array[i], 0, sizeof(short)*cols);\n",
    "\t}\n",
    "}\n",
    "\n",
    "void em_proc_sparse(int **corpus_e, int **corpus_f, int corpus_len, int sentnum1, int sentnum2,\n",
    "\t\tint vocabsize_e, int vocabsize_f,float ** t, int K = 5,\n",
    "\t\tfloat epsilon = 0.001, int startturn=0, int maxturns=10000, int verbose=1)\n",
    "{\n",
    "\t/*SparseMatrix tn(1,1);\n",
    "\tSparseMatrix count(vocabsize_e, vocabsize_f);\n",
    "\tSparseMatrix total(vocabsize_f, 1);\n",
    "\tSparseMatrix s_total(vocabsize_e, 1);*/\n",
    "\tfloat ** tn = build_tm(t,vocabsize_e,vocabsize_f);\n",
    "\tif(tn==NULL){\n",
    "\t\tprintf(\"Can't allocate memory for tn.Exit!\\n\");\n",
    "\t\treturn;\n",
    "\t}\n",
    "\tfloat ** count = build_tm(vocabsize_e,vocabsize_f);\n",
    "\tif(count==NULL){\n",
    "\t\tprintf(\"Can't allocate memory for count.Exit!\\n\");\n",
    "\t\trelease_tm(tn,vocabsize_e);\n",
    "\t\treturn;\n",
    "\t}\n",
    "\tfloat * total = new float[vocabsize_f];\n",
    "\tif(total==NULL){\n",
    "\t\tprintf(\"Can't allocate memory for total.Exit!\\n\");\n",
    "\t\trelease_tm(tn,vocabsize_e);\n",
    "\t\trelease_tm(count,vocabsize_e);\n",
    "\t\treturn;\n",
    "\t}\n",
    "\tfloat * s_total = new float[vocabsize_e];\n",
    "\tif(s_total==NULL){\n",
    "\t\tprintf(\"Can't allocate memory for s_total.Exit!\\n\");\n",
    "\t\trelease_tm(tn,vocabsize_e);\n",
    "\t\trelease_tm(count,vocabsize_e);\n",
    "\t\tdelete[] total;\n",
    "\t\treturn;\n",
    "\t}\n",
    "\tmemset(total, 0, sizeof(float)*vocabsize_f);\n",
    "\tmemset(s_total, 0, sizeof(float)*vocabsize_e);\n",
    "\n",
    "\tint turns = startturn;\n",
    "\tfloat temp1,temp2,temp3,temp4;\n",
    "\tfloat normv;\n",
    "\tint g = 0;\n",
    "\tint start_f,end_f;\n",
    "\tint start_e,end_e;\n",
    "\t//int totalc,currentc;\n",
    "\tint blocksize_f = vocabsize_f / K;\n",
    "\tint blocksize_e = vocabsize_e / K;\n",
    "\tclock_t t0 = clock();\n",
    "\tclock_t t1=t0, t2;\n",
    "\t//corpus_len*sentnum1*sentnum2+corpus_len*sentnum1*sentnum2+(end-start)*vocabsize_e\n",
    "\twhile(true){\n",
    "\t\tg = (g+1)%K;\n",
    "\n",
    "        start_f = g*blocksize_f;\n",
    "        end_f = start_f + blocksize_f-1;\n",
    "        if (g == K-1) end_f = vocabsize_f-1;\n",
    "\n",
    "\t\tstart_e = g*blocksize_e;\n",
    "        end_e = start_e + blocksize_e-1;\n",
    "        if (g == K-1) end_e = vocabsize_e-1;\n",
    "\n",
    "\t\t//totalc = corpus_len*sentnum1*sentnum2+corpus_len*sentnum1*sentnum2+(end-start)*vocabsize_e;\n",
    "\t\t//printf(\"Turn %d:\",turns+1);\n",
    "\t\t//prg.reset(0);\n",
    "\t\treset_tm(count,vocabsize_e,vocabsize_f);//count.reset();\n",
    "\t\treset_v(total, vocabsize_f);//total.reset();\n",
    "\t\tfor(int i = 0; i < corpus_len; i++){\n",
    "\t\t\tfor (int j = 0; j < sentnum1; j++){\n",
    "\t\t\t\tif(corpus_e[i][j]==-1)break;\n",
    "\t\t\t\t//s_total.set(corpus_e[i][j],0,0);\n",
    "\t\t\t\ttemp1 = 0;\n",
    "\t\t\t\tfor(int k = 0; k < sentnum2; k++){\n",
    "\t\t\t\t\tif(corpus_f[i][k]==-1)break;\n",
    "\t\t\t\t\t//temp1 = s_total.get(corpus_e[i][j],0);\n",
    "\t\t\t\t\ttemp2 = t[corpus_e[i][j]][corpus_f[i][k]];\n",
    "\t\t\t\t\ttemp1 += temp2;\n",
    "\t\t\t\t\t//currentc = i*sentnum1*sentnum2\n",
    "\t\t\t\t}\n",
    "\t\t\t\ts_total[corpus_e[i][j]]=temp1;\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\n",
    "\t\tfor(int i = 0; i < corpus_len; i++){\n",
    "\t\t\tfor (int j = 0; j < sentnum1; j++){\n",
    "\t\t\t\tif(corpus_e[i][j]==-1)break;\n",
    "\t\t\t\ttemp1 = s_total[corpus_e[i][j]];\n",
    "\t\t\t\tif(temp1==0.0) temp1 = EPSILON;\n",
    "\t\t\t\tfor(int k = 0; k < sentnum2; k++){\n",
    "\t\t\t\t\tif(corpus_f[i][k]==-1)break;\n",
    "\t\t\t\t\ttemp2 = t[corpus_e[i][j]][corpus_f[i][k]];\n",
    "\t\t\t\t\ttemp3 = total[corpus_f[i][k]];\n",
    "\t\t\t\t\ttemp4 = count[corpus_e[i][j]][corpus_f[i][k]];\n",
    "\t\t\t\t\tcount[corpus_e[i][j]][corpus_f[i][k]]=temp4+temp2/temp1;\n",
    "\t\t\t\t\ttotal[corpus_f[i][k]]=temp3+temp2/temp1;\n",
    "\n",
    "\t\t\t\t}\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\n",
    "\t\t//for(int f = 0; f < vocabsize_f; f++){\n",
    "\t\tfor(int f = start_f; f < end_f+1; f++){\n",
    "\t\t\ttemp2 = total[f];\n",
    "\t\t\tif(temp2 ==0.0) temp2= EPSILON;\n",
    "\t\t\t//for (int e = 0; e < vocabsize_e; e++){\n",
    "\t\t\tfor (int e = start_e; e < end_e+1; e++){\n",
    "\t\t\t\ttemp1 = count[e][f];\n",
    "\t\t\t\tt[e][f]=temp1/temp2;\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\n",
    "\t\tturns ++;\n",
    "\t\tnormv = norm_tm(t,tn,vocabsize_e,vocabsize_f);\n",
    "\t\tt2 = clock();\n",
    "\n",
    "\t\tif (normv <= epsilon){\n",
    "\t\t\tif(verbose==1){\n",
    "\t\t\t\tprintf(\"after %d turns, calculation converged! Total time consumption: %f\\n\",turns, (double)(t2-t0)/CLOCKS_PER_SEC);\n",
    "\t\t\t}\n",
    "\t\t\tbreak;\n",
    "\t\t}\n",
    "\t\telse{\n",
    "\t\t\t//tn.copy(t);\n",
    "\t\t\tcopy_tm(tn,t,vocabsize_e,vocabsize_f);\n",
    "\t\t}\n",
    "\t\tif(verbose == 1){\n",
    "\t\t\tprintf(\"reach %d turns. total: %f, current: %f, norm:%f\\n\",\n",
    "\t\t\t\t\tturns,(double)(t2-t0)/CLOCKS_PER_SEC,\n",
    "\t\t\t\t\t(double)(t2-t1)/CLOCKS_PER_SEC, normv);\n",
    "\t\t}\n",
    "\t\tif(turns % 50 == 0){\n",
    "\t\t\tchar fn[40];\n",
    "\t\t\tsprintf(fn,\"em_result_%d(%f).txt\",turns,normv);\n",
    "\t\t\tsavet(fn,t,vocabsize_e,vocabsize_f);\n",
    "\t\t}\n",
    "\t\tt1 = t2;\n",
    "\t\tif(maxturns == -1) continue;\n",
    "\t\tif(turns >= maxturns){\n",
    "\t\t\tif (verbose==1){\n",
    "\t\t\t\tprintf(\"reach max turns(%d)! Total time consumption: %f\\n\",maxturns, (double)(t2-t0)/CLOCKS_PER_SEC);\n",
    "\t\t\t}\n",
    "\t\t\tbreak;\n",
    "\t\t}\n",
    "\t}\n",
    "\tdelete[] total;\n",
    "\tdelete[] s_total;\n",
    "\trelease_tm(count,vocabsize_e);\n",
    "\trelease_tm(tn,vocabsize_e);\n",
    "}\n",
    "\n",
    "int main(int argc, char * argv[])\n",
    "{\n",
    "\tint vocabsize_1,vocabsize_2;\n",
    "\tif(argc < 7){\n",
    "\t\tcout << \"not enough parameters!\"<<endl;\n",
    "\t\treturn 0;\n",
    "\t}\n",
    "\tint maxturn = atoi(argv[4]);\n",
    "\tint group = atoi(argv[5]);\n",
    "\tint startturn = atoi(argv[6]);\n",
    "\t//SparseMatrix t(5,5);\n",
    "\t//读入t(e|f)初始化值\n",
    "    //构建一个非稀疏的t矩阵tm\n",
    "    float ** tm = build_tm(argv[1],&vocabsize_1, &vocabsize_2);\n",
    "\t//读入corpus_e数值\n",
    "\tint ** corpus1, linenum1, sentnum1;\n",
    "\tcorpus1 = read_corpus(argv[2],&linenum1, &sentnum1);\n",
    "\t//读入corpus_f数值\n",
    "\tint ** corpus2, linenum2, sentnum2;\n",
    "\tcorpus2 = read_corpus(argv[3],&linenum2, &sentnum2);\n",
    "\t//错误检查\n",
    "\tprogress prg;\n",
    "\tprintf(\"Error check for %s :\",argv[2]);\n",
    "\tprg.reset(0);\n",
    "\tint errcnt = 0;\n",
    "\tint firstdisplay = 10;\n",
    "\tfor(int i = 0; i<linenum1;i++){\n",
    "\t\tfor (int j = 0; j < sentnum1; j++){\n",
    "\t\t\tif(corpus1[i][j] == -1) break;\n",
    "\t\t\tif(corpus1[i][j]<0 || corpus1[i][j] >= vocabsize_1){\n",
    "\t\t\t\terrcnt++;\n",
    "\t\t\t\tif(firstdisplay){\n",
    "\t\t\t\t\tfirstdisplay --;\n",
    "\t\t\t\t\tprintf(\"(%d,%d):%d\\n\",i,j,corpus1[i][j]);\n",
    "\t\t\t\t}\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\t\tprg.step((int)((i+1)*100./linenum1));\n",
    "\t}\n",
    "\tprintf(\"\\ncorpus1 errcnt: %d\\n\",errcnt);\n",
    "\tprintf(\"Error check for %s :\",argv[3]);\n",
    "\tprg.reset(0);\n",
    "\terrcnt = 0;\n",
    "\tfor(int i = 0; i<linenum2;i++){\n",
    "\t\tfor (int j = 0; j < sentnum2; j++){\n",
    "\t\t\tif(corpus2[i][j] == -1) break;\n",
    "\t\t\tif(corpus2[i][j]<0 || corpus2[i][j] >= vocabsize_2){\n",
    "\t\t\t\terrcnt++;\n",
    "\t\t\t}\n",
    "\t\t}\n",
    "\t\tprg.step((int)((i+1)*100./linenum2));\n",
    "\t}\n",
    "\tprintf(\"\\ncorpus2 errcnt: %d\\n\",errcnt);\n",
    "\t//进行em迭代计算\n",
    "\tem_proc_sparse(corpus1,corpus2,linenum1,sentnum1,sentnum2,vocabsize_1,vocabsize_2,tm,group,0.001,startturn,maxturn,1);\n",
    "\t//t.save(\"em_result.txt\");\n",
    "\t//savet(\"em_result.txt\",tm,vocabsize_1,vocabsize_2);\n",
    "    release_tm(tm,vocabsize_1);\n",
    "\trelease_corpus(corpus1, linenum1);\n",
    "\trelease_corpus(corpus2, linenum2);\n",
    "\n",
    "    return 0;\n",
    "}"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
