{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Naive Bayes\n",
    "这个 ipynb 使用 Naive Bayes 完成 Spam Detection 任务。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据导入初始化\n",
    "import os\n",
    "import re\n",
    "import random\n",
    "import numpy as np \n",
    "\n",
    "# 数据集介绍\n",
    "'''\n",
    "这里使用的数据集是 Ling-Spam Dataset 的子集，包含 867 条 training emails 和 291 条 testing emails. Spam 的名字统一命名为 'spmsg*.txt'.\n",
    "每个文件的头两行是标题，第三行开始是内容。\n",
    "'''\n",
    "\n",
    "# 数据导入\n",
    "test_data = []\n",
    "train_data = []\n",
    "def DataLoader(path, L):\n",
    "    # 获取文件下所有的文件名\n",
    "    files = os.listdir(path)\n",
    "        \n",
    "    for filename in files:\n",
    "        filename = path+'/' + filename\n",
    "        with open(filename,'r') as f:\n",
    "            # 1 代表 Spam, 0 代表非 Spam\n",
    "            label = 1 if re.match('(.*)spmsg(.*).txt',filename) != None else 0\n",
    "#             print(filename,label)\n",
    "            L.append((f.read(),label))\n",
    "    \n",
    "test_path = 'hw4_nb/test-mails'\n",
    "train_path = 'hw4_nb/train-mails'\n",
    "DataLoader(train_path, train_data)\n",
    "DataLoader(test_path, test_data)\n",
    "# 打乱顺序\n",
    "random.shuffle(train_data)\n",
    "random.shuffle(test_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "label:\t0\n",
      "\n",
      "mail:\tSubject: references in slavic syntax\n",
      "\n",
      "dear linguists , i am going to write a coursework in syntax on government and binding grammar basis . i have preliminary chosen two topics : \" formation of yes - no questions in russian \" or \" double negation in russian \" ( like \" he does not eat nothing \" ) . could you send me some references on these two topics so that i could read the relevant literature and choose one of the topics . i would prefer references on yes-no question formation and double negation in slavic languages but any references on these topics will do , preferred are references in gb framework . thank you , elena rudnitskaya .\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 查看读取的文件\n",
    "print('label:\\t%s\\n' % train_data[24][1])\n",
    "print('mail:\\t%s\\n' % train_data[24][0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 移除标点符号\n",
    "算法第一步，移除所有的标点符号。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from functools import reduce\n",
    "def RemoveTokens(data):\n",
    "    L = []\n",
    "    for mail, label in data:\n",
    "        L.append([list(filter(None,re.split('[^a-zA-Z]+',mail))),label])\n",
    "    return L\n",
    "\n",
    "train_data = RemoveTokens(train_data)\n",
    "test_data = RemoveTokens(test_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[['Subject', 're', 'sum', 'walloon', 'several', 'weeks', 'ago', 'i', 'posted', 'a', 'passage', 'in', 'the', 'local', 'walloon', 'of', 'the', 'bastogne', 'area', 'of', 'belgium', 'and', 'asked', 'for', 'a', 'translation', 'i', 'had', 'quite', 'a', 'number', 'of', 'responses', 'and', 'it', 'appears', 'that', 'the', 'responders', 'had', 'a', 'good', 'time', 'wrestling', 'with', 'the', 'passage', 'here', 'is', 'the', 'passage', 'i', 'gn', 'e', 'a', 'po', 'pre', 's', 'kinze', 'ans', 'du', 'd', 'ci', 'dj', 'asto', 'amon', 'albe', 'rt', 'le', 'yona', 'rd', 'e', 't', 'dj', 'rawa', 'rdo', 'pace', 'k', 'on', 'm', 'avot', 'dit', 'k', 'ou', 'profe', 'sseu', 'r', 'do', 'se', 'mine', 're', 'vlot', 'nos', 've', 'y', 'po', 'pa', 'rler', 'do', 'walon', 'dju', 'm', 'sovin', 'co', 'k', 'dj', 'e', 'dmande', 'a', 'c', 'momint', 'la', 'kin', 'a', 'dje', 'k', 'il', 'e', 'don', 'c', 'cure', 'la', 'dj', 'e', 'vite', 'avou', 'compris', 'k', 'i', 'n', 'astot', 'nin', 'pus', 'cure', 'k', 'mi', 'surtout', 'cand', 'dj', 'l', 'e', 've', 'you', 'avou', 'oune', 'be', 'le', 'djon', 'ne', 'bwe', 'ce', 'le', 'ki', 'n', 'compurdot', 'we', 're', 'lu', 'walon', 'me', 's', 'k', 'astot', 'bin', 'de', 'cide', 'a', 'l', 'aprinde', 'avou', 'de', 's', 'profe', 'sseu', 'rs', 'come', 'pierrot', 'come', 'jeannot', 'come', 'roger', 'ou', 'come', 'mi', 'di', 'st', 'i', 'l', 'fou', 'and', 'here', 'is', 'the', 'consensus', 'translation', 'il', 'y', 'a', 'a', 'peu', 'pre', 's', 'quinze', 'ans', 'd', 'ici', 'j', 'e', 'tais', 'chez', 'albert', 'le', 'onard', 'et', 'j', 'attendais', 'parce', 'qu', 'on', 'm', 'avait', 'dit', 'qu', 'un', 'professeur', 'du', 'se', 'minaire', 'voulait', 'nous', 'voir', 'pour', 'parler', 'du', 'wallon', 'je', 'me', 'souviens', 'ce', 'que', 'j', 'ai', 'demande', 'a', 'ce', 'moment', 'la', 'quel', 'a', 'ge', 'a', 't', 'il', 'donc', 'ce', 'cure', 'la', 'j', 'avais', 'vite', 'compris', 'qu', 'il', 'n', 'e', 'tais', 'pas', 'plus', 'cure', 'que', 'moi', 'surtout', 'quand', 'je', 'l', 'ai', 'vu', 'avec', 'une', 'belle', 'jeune', 'demoiselle', 'qui', 'ne', 'comprenait', 'gue', 're', 'le', 'wallon', 'mais', 'qui', 'e', 'tait', 'bien', 'de', 'cide', 'e', 'a', 'l', 'apprendre', 'avec', 'des', 'professeurs', 'comme', 'pierrot', 'comme', 'jeannot', 'comme', 'roger', 'ou', 'comme', 'moi', 'dit', 'il', 'le', 'fou', 'there', 'were', 'some', 'uncertainties', 'and', 'disagreements', 'i', 've', 'gone', 'with', 'the', 'majority', 'view', 'in', 'each', 'case', 'the', 'biggest', 'problem', 'was', 'the', 'very', 'last', 'bit', 'which', 'caused', 'havoc', 'though', 'the', 'gist', 'of', 'it', 'seems', 'generally', 'clear', 'there', 'was', 'some', 'disagreement', 'as', 'to', 'whether', 'it', 'was', 'the', 'non', 'priest', 'or', 'the', 'young', 'girl', 'who', 'wanted', 'to', 'learn', 'walloon', 'most', 'people', 'went', 'for', 'the', 'woman', 'which', 'is', 'what', 'the', 'syntax', 'appears', 'to', 'say', 'the', 'strange', 'form', 'rawa', 'rdo', 'in', 'the', 'second', 'line', 'appears', 'to', 'be', 'the', 'same', 'word', 'as', 'french', 'regarder', 'but', 'i', 'am', 'told', 'that', 'in', 'northern', 'dialects', 'this', 'word', 'is', 'widely', 'used', 'in', 'the', 'sense', 'of', 'wait', 'wait', 'for', 'watch', 'for', 'another', 'headache', 'was', 'the', 'word', 'bwe', 'ce', 'le', 'in', 'line', 'this', 'clearly', 'means', 'something', 'like', 'demoiselle', 'and', 'it', 'is', 'possibly', 'the', 'same', 'word', 'as', 'french', 'pucelle', 'virgin', 'maiden', 'which', 'is', 'unlikely', 'to', 'be', 'the', 'sense', 'intended', 'i', 'am', 'told', 'that', 'speakers', 'of', 'walloon', 'are', 'now', 'generally', 'elderly', 'apart', 'from', 'some', 'younger', 'people', 'who', 'are', 'enthusiasts', 'though', 'the', 'belgian', 'student', 'i', 'consulted', 'here', 'told', 'me', 'she', 'had', 'a', 'friend', 'who', 'spoke', 'walloon', 'some', 'younger', 'people', 'of', 'limited', 'education', 'are', 'also', 'said', 'to', 'have', 'a', 'grasp', 'of', 'the', 'variety', 'and', 'engagingly', 'i', 'am', 'told', 'that', 'obscenities', 'and', 'insults', 'tend', 'to', 'persist', 'especially', 'well', 'one', 'respondent', 'objected', 'to', 'my', 'speaking', 'of', 'walloon', 'as', 'a', 'dialect', 'of', 'french', 'preferring', 'to', 'see', 'it', 'as', 'a', 'distinct', 'language', 'here', 's', 'an', 'english', 'version', 'it', 's', 'fifteen', 'years', 'ago', 'now', 'i', 'was', 'at', 'the', 'albert', 'leonard', 'institute', 'and', 'i', 'was', 'curious', 'because', 'i', 'had', 'been', 'told', 'that', 'a', 'teacher', 'from', 'the', 'seminary', 'wanted', 'to', 'see', 'us', 'to', 'talk', 'about', 'walloon', 'i', 'still', 'remember', 'what', 'i', 'was', 'wondering', 'at', 'that', 'moment', 'so', 'how', 'old', 'is', 'that', 'priest', 'i', 'quickly', 'understood', 'that', 'he', 'was', 'no', 'more', 'a', 'priest', 'than', 'i', 'was', 'particularly', 'when', 'i', 'saw', 'him', 'with', 'a', 'beautiful', 'young', 'girl', 'who', 'scarcely', 'knew', 'any', 'walloon', 'but', 'who', 'was', 'determined', 'to', 'learn', 'it', 'from', 'teachers', 'like', 'pierrot', 'like', 'jannot', 'like', 'roger', 'or', 'like', 'me', 'that', 's', 'what', 'he', 'said', 'the', 'fool', 'the', 'passage', 'was', 'written', 'by', 'm', 'georges', 'pasau', 'pre', 's', 'ident', 'du', 'muse', 'e', 'de', 'la', 'parole', 'au', 'pays', 'de', 'bastogne', 'it', 'appeared', 'in', 'issue', 'of', 'the', 'magazine', 'singuliers', 'which', 'is', 'devoted', 'to', 'walloon', 'this', 'was', 'a', 'special', 'number', 'given', 'over', 'to', 'the', 'publication', 'of', 'a', 'new', 'dictionary', 'of', 'walloon', 'the', 'dictionary', 'is', 'michel', 'francard', 'dictionnaire', 'des', 'parlers', 'wallons', 'du', 'pays', 'de', 'bastogne', 'brussels', 'deboeck', 'universite', 'isbn', 'francard', 'is', 'professor', 'of', 'romance', 'linguistics', 'at', 'the', 'university', 'of', 'louvain', 'in', 'belgium', 'his', 'address', 'is', 'faculte', 'des', 'lettres', 'universite', 'catholique', 'de', 'louvain', 'louvain', 'le', 'nauve', 'belgium', 'and', 'his', 'e', 'mail', 'is', 'francard', 'frwa', 'ucl', 'ac', 'be', 'i', 'have', 'n', 't', 'consulted', 'him', 'yet', 'but', 'plan', 'to', 'do', 'so', 'the', 'orthography', 'used', 'here', 'for', 'walloon', 'was', 'i', 'believe', 'invented', 'by', 'francard', 'specifically', 'for', 'the', 'dictionary', 'the', 'dictionary', 'contains', 'some', 'nice', 'dialect', 'maps', 'and', 'some', 'linguistic', 'information', 'about', 'walloon', 'my', 'thanks', 'to', 'jean', 'francois', 'carrasco', 'richard', 'coates', 'jean', 'francois', 'delannoy', 'roger', 'feron', 'frederik', 'fouvry', 'vincent', 'granville', 'ted', 'harding', 'yolande', 'meessen', 'philippe', 'mennecier', 'bernd', 'moebius', 'and', 'his', 'mother', 'joseph', 'reisdoerfer', 'thierry', 'j', 'van', 'steenberghe', 'guido', 'vanden', 'wyngaerd', 'and', 'max', 'wheeler', 'larry', 'trask', 'cogs', 'university', 'of', 'sussex', 'brighton', 'bn', 'qh', 'england', 'larryt', 'cogs', 'susx', 'ac', 'uk'], 0]\n",
      "[['Subject', 'coling', 'acl', 'registration', 'deadline', 'early', 'registration', 'for', 'coling', 'acl', 'ends', 'july', 'to', 'benefit', 'from', 'reduced', 'rates', 'your', 'registration', 'form', 'and', 'your', 'payment', 'must', 'be', 'received', 'by', 'that', 'date', 'for', 'further', 'information', 'on', 'the', 'conference', 'and', 'on', 'how', 'to', 'register', 'consult', 'the', 'conference', 'web', 'site', 'at', 'http', 'coling', 'acl', 'iro', 'umontreal', 'ca'], 0]\n"
     ]
    }
   ],
   "source": [
    "# 测试是否移除标点符号\n",
    "print(train_data[0])\n",
    "print(test_data[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 实现 Spam Detector"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "from functools import reduce\n",
    "def Symbols(data):\n",
    "    # 提取字母表\n",
    "    V = []\n",
    "    for e in data:\n",
    "        mail = e[0]\n",
    "        label_ = e[1]\n",
    "        for symbol in mail:\n",
    "            if symbol in V:\n",
    "                pass\n",
    "            else:\n",
    "                V.append(symbol)\n",
    "    return V\n",
    "\n",
    "def Count(data,label):\n",
    "    # 特定label的元素\n",
    "    D = []\n",
    "    for e in data:\n",
    "        data_ = e[0]\n",
    "        label_ = e[1]\n",
    "        if label == label_:\n",
    "            D.append(data_)\n",
    "    return D\n",
    "        \n",
    "class SpamDetector(object):\n",
    "    def __init__(self):\n",
    "        # 先验概率\n",
    "        self.P0 = None\n",
    "        self.P1 = None\n",
    "        # 后验概率\n",
    "        self.P = {}\n",
    "        # 词汇表 \n",
    "        self.V = []\n",
    "    \n",
    "    def train(self, train_data):\n",
    "        # (1) 计算单词空间\n",
    "        print('On computing alphabet...')\n",
    "        self.V = Symbols(train_data)\n",
    "        # (2) 计算先验概率\n",
    "        print('On computing prior prob...')\n",
    "        D0 = Count(train_data,0)\n",
    "        D1 = Count(train_data,1)\n",
    "        self.P0 = len(D0) / len(train_data)\n",
    "        self.P1 = len(D1) / len(train_data)\n",
    "        # (3) 计算 Tc\n",
    "        T0 = np.array(reduce(lambda x,y:x+y, D0))\n",
    "        T1 = np.array(reduce(lambda x,y:x+y, D1))\n",
    "        n0 = len(T0)\n",
    "        n1 = len(T1)\n",
    "        # (4) 计算后验概率\n",
    "        print('On computing posterior prob...')\n",
    "        for word in self.V:\n",
    "            n0k = np.sum((T0==word) + 0)\n",
    "            n1k = np.sum((T1==word) + 0)\n",
    "            self.P[word+'0'] = (n0k + 1) / (n0 + len(self.V))\n",
    "            self.P[word+'1'] = (n1k + 1) / (n1 + len(self.V))\n",
    "    \n",
    "    def test(self, test_datas):\n",
    "        pred_y = []\n",
    "        print('On computing test...')\n",
    "        for e in test_datas:\n",
    "            data = e[0]\n",
    "            L = []\n",
    "            for word in data:\n",
    "                if word in self.V:\n",
    "                    L.append(word)\n",
    "            # 使用对数，防止过小造成数据溢出\n",
    "            y0 = y1 = 0\n",
    "            for word in L:\n",
    "                y0 += np.log(self.P[word+'0'])\n",
    "                y1 += np.log(self.P[word+'1'])\n",
    "            y0 += np.log(self.P0)\n",
    "            y1 += np.log(self.P1)\n",
    "            # print(y0, y1)\n",
    "            pred_y.append(0 if y0 > y1 else 1)\n",
    "        return pred_y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "On computing alphabet...\n",
      "On computing prior prob...\n",
      "On computing posterior prob...\n",
      "On computing test...\n"
     ]
    }
   ],
   "source": [
    "# 训练 naive bayes\n",
    "detector = SpamDetector()\n",
    "detector.train(train_data)\n",
    "pred_y = detector.test(test_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Value</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>accuracy</th>\n",
       "      <td>0.993127</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>precision</th>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>recall</th>\n",
       "      <td>0.959184</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>F-score</th>\n",
       "      <td>0.979167</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "              Value\n",
       "accuracy   0.993127\n",
       "precision  1.000000\n",
       "recall     0.959184\n",
       "F-score    0.979167"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 计算几个正确性的参数\n",
    "import pandas as pd\n",
    "tp = 0\n",
    "tn = 0\n",
    "fp = 0\n",
    "fn = 0\n",
    "for i in range(0,len(test_data)):\n",
    "    if test_data[i][1] == 1 and pred_y[i] == 1:\n",
    "        tp += 1\n",
    "    elif test_data[i][1] == 1 and pred_y[i] == 0:\n",
    "        fn += 1\n",
    "    elif test_data[i][1] == 0 and pred_y[i] == 1:\n",
    "        fp += 1\n",
    "    elif test_data[i][1] == 0 and pred_y[i] == 0:\n",
    "        tn += 1\n",
    "\n",
    "accuracy = (tp + tn) / (tp + tn + fp + fn)\n",
    "precision = tp / (tp + fp)\n",
    "recall = tp / (tp + fn)\n",
    "fscore = 2 / (1 / precision + 1 / recall)\n",
    "df = pd.DataFrame(\n",
    "    {'Value':[accuracy, precision,recall,fscore]},\n",
    "    index = ['accuracy','precision','recall','F-score']\n",
    ")\n",
    "df.to_csv('Result.csv')\n",
    "df\n",
    "# print(\n",
    "#     'accuracy:\\t%s\\n' % accuracy,\n",
    "#     'precision:\\t%s\\n' % precision,\n",
    "#     'recall:\\t%s\\n' % recall,\n",
    "#     'F-score:\\t%s\\n' % fscore\n",
    "# )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch",
   "language": "python",
   "name": "torch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
