{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取邮件\n",
    "import os\n",
    "EMAIL_PATH = os.path.join('datasets','spam')#./datasets/spam\n",
    "easy_ham = os.path.join(EMAIL_PATH,'easy_ham')\n",
    "spam = os.path.join(EMAIL_PATH,'spam')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2500\n",
      "500\n"
     ]
    }
   ],
   "source": [
    "ham_filename = [name for name in os.listdir(easy_ham) if len(name)>20]\n",
    "spam_filename = [name for name in os.listdir(spam) if len(name)>20]\n",
    "print(len(ham_filename))\n",
    "print(len(spam_filename))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import email\n",
    "import email.policy\n",
    "def load_email(filename,is_spam):\n",
    "    spam_name = spam if is_spam else easy_ham\n",
    "    with open(os.path.join(spam_name,filename),'rb') as f:\n",
    "        return email.parser.BytesParser(policy=email.policy.default).parse(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "ham_emails = [load_email(name,is_spam=False) for name in ham_filename]\n",
    "spam_emails = [load_email(name,is_spam=True) for name in spam_filename]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Martin A posted:\n",
      "Tassos Papadopoulos, the Greek sculptor behind the plan, judged that the\n",
      " limestone of Mount Kerdylio, 70 miles east of Salonika and not far from the\n",
      " Mount Athos monastic community, was ideal for the patriotic sculpture. \n",
      " \n",
      " As well as Alexander's granite features, 240 ft high and 170 ft wide, a\n",
      " museum, a restored amphitheatre and car park for admiring crowds are\n",
      "planned\n",
      "---------------------\n",
      "So is this mountain limestone or granite?\n",
      "If it's limestone, it'll weather pretty fast.\n",
      "\n",
      "------------------------ Yahoo! Groups Sponsor ---------------------~-->\n",
      "4 DVDs Free +s&p Join Now\n",
      "http://us.click.yahoo.com/pt6YBB/NXiEAA/mG3HAA/7gSolB/TM\n",
      "---------------------------------------------------------------------~->\n",
      "\n",
      "To unsubscribe from this group, send an email to:\n",
      "forteana-unsubscribe@egroups.com\n",
      "\n",
      " \n",
      "\n",
      "Your use of Yahoo! Groups is subject to http://docs.yahoo.com/info/terms/\n"
     ]
    }
   ],
   "source": [
    "# ham_emails[1].get_content().strip()\n",
    "print(ham_emails[1].get_content().strip())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#查看邮件的各种类型结构\n",
    "def get_email_struct(email):\n",
    "    if isinstance(email,str):\n",
    "        #如果是字符就返回该信息\n",
    "        return email\n",
    "    payload = email.get_payload()\n",
    "    if isinstance(payload,list):\n",
    "        #如果邮件内容是list，遍历list，递归\n",
    "        return 'multipart({})'.format(', '.join([get_email_struct(sub_email)\\\n",
    "                                              for sub_email in payload]))\n",
    "    else:\n",
    "        #否则返回内容的类型\n",
    "        return email.get_content_type()\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "from collections import Counter\n",
    "#计数邮件的数据类型，并存入列表\n",
    "# get_email_struct(ham_emails[1].get_content())\n",
    "def count_email_type(emails):\n",
    "    struct = [get_email_struct(email) for email in emails]\n",
    "    return Counter(struct)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('text/plain', 218),\n",
       " ('text/html', 183),\n",
       " ('multipart(text/plain, text/html)', 45),\n",
       " ('multipart(text/html)', 20),\n",
       " ('multipart(text/plain)', 19),\n",
       " ('multipart(multipart(text/html))', 5),\n",
       " ('multipart(text/plain, image/jpeg)', 3),\n",
       " ('multipart(text/html, application/octet-stream)', 2),\n",
       " ('multipart(text/plain, application/octet-stream)', 1),\n",
       " ('multipart(text/html, text/plain)', 1),\n",
       " ('multipart(multipart(text/html), application/octet-stream, image/jpeg)', 1),\n",
       " ('multipart(multipart(text/plain, text/html), image/gif)', 1),\n",
       " ('multipart/alternative', 1)]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#垃圾邮件:多带有HTML\n",
    "count_email_type(spam_emails).most_common()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('text/plain', 2408),\n",
       " ('multipart(text/plain, application/pgp-signature)', 66),\n",
       " ('multipart(text/plain, text/html)', 8),\n",
       " ('multipart(text/plain, text/plain)', 4),\n",
       " ('multipart(text/plain)', 3),\n",
       " ('multipart(text/plain, application/octet-stream)', 2),\n",
       " ('multipart(text/plain, text/enriched)', 1),\n",
       " ('multipart(text/plain, application/ms-tnef, text/plain)', 1),\n",
       " ('multipart(multipart(text/plain, text/plain, text/plain), application/pgp-signature)',\n",
       "  1),\n",
       " ('multipart(text/plain, video/mng)', 1),\n",
       " ('multipart(text/plain, multipart(text/plain))', 1),\n",
       " ('multipart(text/plain, application/x-pkcs7-signature)', 1),\n",
       " ('multipart(text/plain, multipart(text/plain, text/plain), text/rfc822-headers)',\n",
       "  1),\n",
       " ('multipart(text/plain, multipart(text/plain, text/plain), multipart(multipart(text/plain, application/x-pkcs7-signature)))',\n",
       "  1),\n",
       " ('multipart(text/plain, application/x-java-applet)', 1)]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#正常邮件\n",
    "count_email_type(ham_emails).most_common()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Return-Path : <12a1mailbot1@web.de>\n",
      "Delivered-To : zzzz@localhost.spamassassin.taint.org\n",
      "Received : from localhost (localhost [127.0.0.1])\tby phobos.labs.spamassassin.taint.org (Postfix) with ESMTP id 136B943C32\tfor <zzzz@localhost>; Thu, 22 Aug 2002 08:17:21 -0400 (EDT)\n",
      "Received : from mail.webnote.net [193.120.211.219]\tby localhost with POP3 (fetchmail-5.9.0)\tfor zzzz@localhost (single-drop); Thu, 22 Aug 2002 13:17:21 +0100 (IST)\n",
      "Received : from dd_it7 ([210.97.77.167])\tby webnote.net (8.9.3/8.9.3) with ESMTP id NAA04623\tfor <zzzz@spamassassin.taint.org>; Thu, 22 Aug 2002 13:09:41 +0100\n",
      "From : 12a1mailbot1@web.de\n",
      "Received : from r-smtp.korea.com - 203.122.2.197 by dd_it7  with Microsoft SMTPSVC(5.5.1775.675.6);\t Sat, 24 Aug 2002 09:42:10 +0900\n",
      "To : dcek1a1@netsgo.com\n",
      "Subject : Life Insurance - Why Pay More?\n",
      "Date : Wed, 21 Aug 2002 20:31:57 -1600\n",
      "MIME-Version : 1.0\n",
      "Message-ID : <0103c1042001882DD_IT7@dd_it7>\n",
      "Content-Type : text/html; charset=\"iso-8859-1\"\n",
      "Content-Transfer-Encoding : quoted-printable\n"
     ]
    }
   ],
   "source": [
    "#收集可用信息，查看邮件头\n",
    "for header,value in spam_emails[0].items():\n",
    "    print(header,':',value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from sklearn.model_selection import train_test_split\n",
    "#拆分训练集和测试集，x为邮件，y使用0,1表示是否是垃圾邮件\n",
    "x = np.array(spam_emails+ham_emails)\n",
    "y = np.array([1]*len(spam_emails)+[0]*len(ham_emails))\n",
    "x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.2,random_state = 42)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    2500\n",
       "1     500\n",
       "dtype: int64"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import pandas as pd\n",
    "pd.Series(y).value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "numpy.ndarray"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "type(x_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# • email.message_from_string()：用来解析消息。\n",
    "# • msg.walk()：遍历消息的附件。\n",
    "# • part.get_content_type()：获得正确 MIME 类型。\n",
    "# • msg.get_payload()：从消息正文中获取特定的部分。通常 decode 标记会设为 True，\n",
    "# 即邮件正文根据每个 Content-Transfer-Encoding 头解码。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Martin A posted:\n",
      "Tassos Papadopoulos, the Greek sculptor behind the plan, judged that the\n",
      " limestone of Mount Kerdylio, 70 miles east of Salonika and not far from the\n",
      " Mount Athos monastic community, was ideal for the patriotic sculpture. \n",
      " \n",
      " As well as Alexander's granite features, 240 ft high and 170 ft wide, a\n",
      " museum, a restored amphitheatre and car park for admiring crowds are\n",
      "planned\n",
      "---------------------\n",
      "So is this mountain limestone or granite?\n",
      "If it's limestone, it'll weather pretty fast.\n",
      "\n",
      "------------------------ Yahoo! Groups Sponsor ---------------------~-->\n",
      "4 DVDs Free +s&p Join Now\n",
      "http://us.click.yahoo.com/pt6YBB/NXiEAA/mG3HAA/7gSolB/TM\n",
      "---------------------------------------------------------------------~->\n",
      "\n",
      "To unsubscribe from this group, send an email to:\n",
      "forteana-unsubscribe@egroups.com\n",
      "\n",
      " \n",
      "\n",
      "Your use of Yahoo! Groups is subject to http://docs.yahoo.com/info/terms/\n"
     ]
    }
   ],
   "source": [
    "print(ham_emails[1].get_content().strip())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "from html import unescape\n",
    "def html_to_plain_text(html):\n",
    "    #首先提取以head开的头的标签,非贪婪模式\n",
    "    #将其中的内容置为空，忽略大小写，多行匹配，.匹配包含换行符内的所有字符\n",
    "    text = re.sub('<head.*?>.*?</head>','',html,flags = re.M|re.S|re.I)\n",
    "    #将a标签中的所有字符替换成HYPERLINK，前面留一个空格\n",
    "    text = re.sub('<a\\s.*?>','HYPERLINK\\n',text,flags = re.M|re.S|re.I)\n",
    "    #对小写敏感，将<>里的所有字符替换成空\n",
    "    text = re.sub('<.*?>','',text,flags=re.M | re.S)\n",
    "    #将一个空格和换行符之间的内容替换成换行符\n",
    "    text = re.sub(r'(\\s*\\n)+','\\n',text,flags=re.M | re.S)\n",
    "    return unescape(text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(html_to_plain_text(sample_html_spam.get_content().strip()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<HTML><HEAD><TITLE>Hi i'm Rita !!!</TITLE>\n",
      "<META http-equiv=Content-Type content=\"text/html; charset=windows-1252\">\n",
      "<META content=\"Microsoft FrontPage 4.0\" name=GENERATOR></HEAD>\n",
      "<BODY bgcolor=\"#FF00FF\" link=\"#800000\" vlink=\"#800000\" alink=\"#800000\" text=\"#FF0000\"><LEFT>\n",
      "<TABLE width=427 height=\"60\">\n",
      "  <TBODY>\n",
      "  <TR>\n",
      "    <TD align=center bgcolor=\"#FF00FF\" width=\"419\" height=\"56\"><b><a href=\"http://www.amsterdamcash.com/click.cfm?siteid=0017&amp;companyid=33043\"><font color=\"#800000\"><span style=\"background-color: #FFFFFF\"><font face=\"Times New Roman\" size=\"7\"><i>R</i></font></span><font face=\"verdana\" size=\"6\"><span style=\"background-color: #00FFFF\">E</span><span style=\"background-color: #FFFF00\">A</span></font><font face=\"Times New Roman\" size=\"6\"><span style=\"background-color: #00FF00\">D</span></font><font face=\"verdana\" size=\"6\">\n",
      "      </font><span style=\"background-color: #FFFFFF\"><font face=\"Tahoma\" size=\"7\"><i>M</i></font></span><font face=\"Lucida Console\" size=\"6\"><span style=\"background-color: #00FF00\">Y</span></font><font face=\"verdana\" size=\"6\">\n",
      "      </font><span style=\"background-color: #FFFF00\"><font face=\"Comic Sans MS\" size=\"7\"><i>L</i></font></span><font face=\"verdana\" size=\"6\"><span style=\"background-color: #00FFFF\">I</span></font><font face=\"Georgia\" size=\"6\"><span style=\"background-color: #00FF00\">P</span></font><font face=\"Tahoma\" size=\"6\"><span style=\"background-color: #FFFF00\">S</span></font><font face=\"verdana\" size=\"5\"><span style=\"background-color: #FFFFFF\">tick</span><span style=\"background-color: #FF00FF\">\n",
      "      </span> </font><span style=\"background-color: #00FFFF\"><font face=\"verdana\" size=\"6\">!</font></span></font></a></b>\n",
      "      <p><a href=\"http://www.amsterdamcash.com/click.cfm?siteid=0017&amp;companyid=33043\" target=\"_blank\"><img alt=\"Nobody knows, I love to smoke....\" src=\"http://privategirlfriend.5u.com/images_123/big.jpg\" border=\"0\"></a></p>\n",
      "    </TD></TR></TBODY></TABLE>\n",
      "<TABLE width=428>\n",
      "  <TR>\n",
      "    <TD align=left bgcolor=\"#FF00FF\" width=\"420\">\n",
      "      <p align=\"center\"><b><a href=\"http://www.amsterdamcash.com/click.cfm?siteid=0017&amp;companyid=33043\"><font face=\"Tahoma\" color=\"#000080\" size=\"7\"><span style=\"background-color: #00FF00\">L</span></font><font color=\"#000080\" size=\"6\"><font face=\"Comic Sans MS\"><span style=\"background-color: #FFFFFF\">I</span><span style=\"background-color: #00FFFF\">V</span></font><font face=\"Times New Roman\"><span style=\"background-color: #FFFF00\">E</span><span style=\"background-color: #FF00FF\">\n",
      "      </span></font></font><font face=\"Microsoft Sans Serif\" color=\"#000080\" size=\"7\"><span style=\"background-color: #C0C0C0\">F</span></font><span style=\"background-color: #FF0000\"><font color=\"#000080\" size=\"6\" face=\"Microsoft Sans Serif\">rom</font></span><font color=\"#000080\" size=\"6\" face=\"Microsoft Sans Serif\"><span style=\"background-color: #FF00FF\">\n",
      "      </span></font><span style=\"background-color: #FFFFFF\"><font color=\"#000080\" face=\"Microsoft Sans Serif\" size=\"7\">A</font></span><font color=\"#000080\" size=\"6\" face=\"Microsoft Sans Serif\"><span style=\"background-color: #FFFFFF\">ms</span><span style=\"background-color: #00FF00\">ter</span><span style=\"background-color: #FFFF00\">dam</span><span style=\"background-color: #FF00FF\">\n",
      "      </span></font><font color=\"#000080\" size=\"7\" face=\"Lucida Console\"><span style=\"background-color: #00FFFF\">!</span></font></a></b></p>\n",
      "    </TD></TR></TABLE><BR><BR>\n",
      "<p>&nbsp;</p>\n",
      "<p>&nbsp;</p>\n",
      "<p>&nbsp;</p>\n",
      "<p>&nbsp;</p>\n",
      "<p>&nbsp;</p>\n",
      "<p><BR><BR><BR><BR><BR><FONT \n",
      "face=Verdana color=#808080 size=1>This mail is NEVER sent unsolicited, Got it by \n",
      "error ?<BR>[ <A href=\"http://www.redlightemail.com/remove.cfm?email=<email>\" \n",
      "target=_blank><FONT color=#808080>CLICK HERE</FONT></A> ] to be removed from our \n",
      "subscribers List !</FONT> \n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "<br>\n",
      "\n",
      "\n",
      "</p>\n",
      "\n",
      "\n",
      "</BODY></HTML>\n",
      "\n",
      "fuclcxlequtkbfuoeseysgfu\n",
      "\n",
      "\n",
      "-------------------------------------------------------\n",
      "This sf.net email is sponsored by:ThinkGeek\n",
      "Welcome to geek heaven.\n",
      "http://thinkgeek.com/sf\n",
      "_______________________________________________\n",
      "Sitescooper-talk mailing list\n",
      "Sitescooper-talk@lists.sourceforge.net\n",
      "https://lists.sourceforge.net/lists/listinfo/sitescooper-talk ...\n"
     ]
    }
   ],
   "source": [
    "html_spam_emails = [email for email in x_train[y_train==1]\n",
    "                    if get_email_struct(email) == \"text/html\"]\n",
    "sample_html_spam = html_spam_emails[7]\n",
    "print(sample_html_spam.get_content().strip(), \"...\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 编写一个函数，它以电子邮件为输入，并以纯文本形式返回其内容，无论其格式是什么\n",
    "def email_to_text(email):\n",
    "    html = None\n",
    "    for part in email.walk():\n",
    "        ctype = part.get_content_type()\n",
    "        if not ctype in (\"text/plain\", \"text/html\"):\n",
    "            continue\n",
    "        try:\n",
    "            content = part.get_content()\n",
    "        except: # 解决编码问题\n",
    "            content = str(part.get_payload())\n",
    "        if ctype == \"text/plain\":\n",
    "            return content\n",
    "        else:\n",
    "            html = content\n",
    "    if html:\n",
    "        return html_to_plain_text(html)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "    HYPERLINK\n",
      "READ\n",
      "      MY\n",
      "      LIPStick\n",
      "       !\n",
      "      HYPERLINK\n",
      "      HYPERLINK\n",
      "LIVE\n",
      "      From\n",
      "      Amsterdam\n",
      "      !\n",
      " \n",
      " \n",
      " \n",
      " \n",
      " \n",
      "This mail is NEVER sent unsolicited, Got it by\n",
      "error ?[ HYPERLINK\n",
      "\"\n",
      "target=_blank>CLICK HERE ] to be removed from our\n",
      "subscribers List !\n",
      "fuclcxlequtkbfuoeseysgfu\n",
      "-------------------------------------------------------\n",
      "This sf.net email is sponsored by:ThinkGeek\n",
      "Welcome to geek heaven.\n",
      "http://thinkgeek.com/sf\n",
      "_______________________________________________\n",
      "Sitescooper-talk mailing list\n",
      "Sitescooper-talk@lists.sourceforge.net\n",
      "https://lists.sourceforge.net/lists/listinfo/sitescooper-talk\n",
      " ...\n"
     ]
    }
   ],
   "source": [
    "print(email_to_text(sample_html_spam), \"...\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Computations => comput\n",
      "Computation => comput\n",
      "Computing => comput\n",
      "Computed => comput\n",
      "Compute => comput\n",
      "Compulsive => compuls\n"
     ]
    }
   ],
   "source": [
    "import nltk\n",
    "from urlextract import URLExtract\n",
    "\n",
    "try:\n",
    "    stemmer = nltk.PorterStemmer()\n",
    "    for word in (\"Computations\", \"Computation\", \"Computing\", \"Computed\", \"Compute\", \"Compulsive\"):\n",
    "        print(word, \"=>\", stemmer.stem(word))\n",
    "except ImportError:\n",
    "    print(\"Error: stemming requires the NLTK module.\")\n",
    "    stemmer = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.base import BaseEstimator, TransformerMixin\n",
    "\n",
    "class EmailToWordCounterTransformer(BaseEstimator, TransformerMixin):\n",
    "    def __init__(self, strip_headers=True, lower_case=True, remove_punctuation=True,\n",
    "                 replace_urls=True, replace_numbers=True,  stemming=True):\n",
    "        self.strip_headers = strip_headers\n",
    "        self.lower_case = lower_case\n",
    "        self.remove_punctuation = remove_punctuation\n",
    "        self.replace_urls = replace_urls\n",
    "        self.replace_numbers = replace_numbers\n",
    "        self.stemming = stemming\n",
    "    def fit(self, X, y=None):\n",
    "        return self\n",
    "    def transform(self, X, y=None):\n",
    "        X_transformed = []\n",
    "        for email in X:\n",
    "            text = email_to_text(email) or \"\"\n",
    "            #是否字母全部小写\n",
    "            if self.lower_case:\n",
    "                text = text.lower()\n",
    "                #是否替换url\n",
    "            if self.replace_urls:\n",
    "                extractor = URLExtract()\n",
    "                #找到所有的url，去重放到一个列表里\n",
    "                urls = list(set(extractor.find_urls(text)))\n",
    "                #以url的长度排序\n",
    "                urls.sort(key=lambda url: len(url), reverse=True)\n",
    "                for url in urls:  # 替换url 为 ‘URL’\n",
    "                    text = text.replace(url, \" URL \")\n",
    "            if self.replace_numbers:  # 替换数字\n",
    "                text = re.sub(r'\\d+(?:\\.\\d*(?:[eE]\\d+))?', 'NUMBER', text)\n",
    "            if self.remove_punctuation:  # 删除标点符号\n",
    "                text = re.sub(r'\\W+', ' ', text, flags=re.M)\n",
    "            #计数每个单词\n",
    "            word_counts = Counter(text.split())\n",
    "            if self.stemming and stemmer is not None:\n",
    "                stemmed_word_counts = Counter()\n",
    "                for word, count in word_counts.items():\n",
    "                    stemmed_word = stemmer.stem(word)\n",
    "                    stemmed_word_counts[stemmed_word] += count\n",
    "                word_counts = stemmed_word_counts\n",
    "            X_transformed.append(word_counts)\n",
    "        return np.array(X_transformed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([Counter({'number': 6, 'url': 2, 'date': 1, 'numbertnumb': 1, 'bbc': 1, 'report': 1, 'donal': 1, 'macintyr': 1, 'win': 1, 'high': 1, 'profil': 1, 'libel': 1, 'case': 1, 'against': 1, 'polic': 1}),\n",
       "       Counter({'waider': 3, 'ie': 3, 'i': 3, 'linux': 3, 'if': 2, 'in': 2, 'befor': 2, 'it': 2, 'that': 2, 'you': 1, 're': 1, 'not': 1, 'doolin': 1, 'beg': 1, 'borrow': 1, 'or': 1, 'steal': 1, 'your': 1, 'way': 1, 'there': 1, 'the': 1, 'lbw': 1, 'folk': 1, 'depart': 1, 's': 1, 'far': 1, 'too': 1, 'much': 1, 'fun': 1, 'cheer': 1, 'just': 1, 'back': 1, 'ye': 1, 'is': 1, 'veri': 1, 'person': 1, 'of': 1, 'me': 1, 'we': 1, 'are': 1, 'fact': 1, 'well': 1, 'and': 1, 'truli': 1, 'doom': 1, 'she': 1, 'say': 1, 'leav': 1, 'now': 1, 'can': 1, 'probabl': 1, 'get': 1, 'a': 1, 'good': 1, 'head': 1, 'start': 1, 'they': 1, 'realiz': 1, 'm': 1, 'gone': 1, 'jami': 1, 'zawinski': 1, 'irish': 1, 'user': 1, 'group': 1, 'ilug': 1, 'url': 1, 'for': 1, 'un': 1, 'subscript': 1, 'inform': 1, 'list': 1, 'maintain': 1, 'listmast': 1}),\n",
       "       Counter({'and': 18, 'of': 16, 'to': 15, 'i': 14, 'the': 14, 'my': 13, 'in': 11, 'you': 9, 'thi': 9, 'for': 7, 'husband': 5, 'that': 5, 'number': 5, 'want': 5, 'your': 5, 'son': 4, 'other': 4, 'will': 4, 'us': 4, 'mr': 3, 'late': 3, 'as': 3, 'situat': 3, 'with': 3, 'kongolo': 3, 'where': 3, 's': 3, 'deposit': 3, 'countri': 3, 'money': 3, 'safe': 3, 'kabila': 3, 'is': 3, 'invest': 3, 'not': 3, 'assist': 3, 'linux': 3, 'am': 2, 'sese': 2, 'seko': 2, 'presid': 2, 'now': 2, 'democrat': 2, 'republ': 2, 'congo': 2, 'drc': 2, 'move': 2, 'wa': 2, 'confid': 2, 'our': 2, 'out': 2, 'settl': 2, 'we': 2, 'later': 2, 'decid': 2, 'chang': 2, 'dollar': 2, 'swiss': 2, 'purpos': 2, 'state': 2, 'ha': 2, 'govern': 2, 'european': 2, 'all': 2, 'confisc': 2, 'ident': 2, 'so': 2, 'be': 2, 'compani': 2, 'can': 2, 'fund': 2, 'also': 2, 'which': 2, 'project': 2, 'url': 2, 'maintain': 2, 'inform': 2, 'social': 2, 'ie': 2, 'dear': 1, 'friend': 1, 'widow': 1, 'mobutu': 1, 'zair': 1, 'known': 1, 'write': 1, 'letter': 1, 'consid': 1, 'present': 1, 'circumst': 1, 'escap': 1, 'along': 1, 'two': 1, 'jame': 1, 'nzanga': 1, 'abidjan': 1, 'cote': 1, 'ivoir': 1, 'famili': 1, 'while': 1, 'morroco': 1, 'die': 1, 'cancer': 1, 'diseas': 1, 'howev': 1, 'due': 1, 'most': 1, 'billion': 1, 'bank': 1, 'into': 1, 'form': 1, 'code': 1, 'becaus': 1, 'new': 1, 'head': 1, 'dr': 1, 'laurent': 1, 'made': 1, 'arrang': 1, 'freez': 1, 'treasur': 1, 'some': 1, 'henc': 1, 'children': 1, 'lay': 1, 'low': 1, 'africa': 1, 'studi': 1, 'till': 1, 'when': 1, 'thing': 1, 'get': 1, 'better': 1, 'like': 1, 'dead': 1, 'take': 1, 'over': 1, 'joseph': 1, 'one': 1, 'chateaux': 1, 'southern': 1, 'franc': 1, 'by': 1, 'french': 1, 'such': 1, 'had': 1, 'trace': 1, 'have': 1, 'sum': 1, 'eighteen': 1, 'million': 1, 'unit': 1, 'a': 1, 'secur': 1, 'keep': 1, 'what': 1, 'do': 1, 'indic': 1, 'yourinterest': 1, 'receiv': 1, 'on': 1, 'behalf': 1, 'introduc': 1, 'who': 1, 'modal': 1, 'claim': 1, 'said': 1, 'but': 1, 'reveal': 1, 'acquir': 1, 'real': 1, 'land': 1, 'properti': 1, 'stock': 1, 'multi': 1, 'nation': 1, 'engag': 1, 'non': 1, 'specul': 1, 'advisebi': 1, 'good': 1, 'self': 1, 'may': 1, 'at': 1, 'point': 1, 'emphas': 1, 'high': 1, 'level': 1, 'confidenti': 1, 'upcom': 1, 'demand': 1, 'hope': 1, 'betray': 1, 'trust': 1, 'repos': 1, 'conclus': 1, 'if': 1, 'shall': 1, 'divulgeto': 1, 'brief': 1, 'regard': 1, 'tell': 1, 'are': 1, 'current': 1, 'discuss': 1, 'remuner': 1, 'servic': 1, 'reason': 1, 'kindli': 1, 'furnish': 1, 'contact': 1, 'person': 1, 'telephon': 1, 'fax': 1, 'valid': 1, 'acknowledg': 1, 'receipt': 1, 'mail': 1, 'use': 1, 'abov': 1, 'email': 1, 'address': 1, 'sincer': 1, 'mariam': 1, 'm': 1, 'seseseko': 1, 'irish': 1, 'user': 1, 'group': 1, 'event': 1, 'un': 1, 'subscript': 1, 'list': 1, 'listmast': 1})],\n",
       "      dtype=object)"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_few = x_train[:3]\n",
    "X_few_wordcounts = EmailToWordCounterTransformer().fit_transform(X_few)\n",
    "X_few_wordcounts"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "from scipy.sparse import csr_matrix\n",
    "#记录单词在词汇表中的位置和出现次数\n",
    "class WordCounterToVectorTransformer(BaseEstimator, TransformerMixin):\n",
    "    def __init__(self, vocabulary_size = 1000):\n",
    "        self.vocabulary_size = vocabulary_size  # 词汇量\n",
    "    def fit(self, X, y = None):\n",
    "        total_count = Counter()\n",
    "        for word_count in X:\n",
    "            for word, count in word_count.items():\n",
    "                total_count[word] += min(count, 10)\n",
    "        most_common = total_count.most_common()[:self.vocabulary_size]\n",
    "        self.most_common_ = most_common\n",
    "        self.vocabulary_ = {word: index + 1 for index, (word, count) in enumerate(most_common)}\n",
    "        return self\n",
    "    def transform(self, X, y = None):\n",
    "        rows = []\n",
    "        cols = []\n",
    "        data = []\n",
    "        for row, word_count in enumerate(X):\n",
    "#             print('row:',row)\n",
    "#             print('word_count:',word_count)\n",
    "            for word, count in word_count.items():\n",
    "#                 print('word',word)\n",
    "#                 print('count',count)\n",
    "                rows.append(row) # 训练集 实例个数\n",
    "                cols.append(self.vocabulary_.get(word, 0)) # 取得单词在词汇表中的索引位置，0代表未出现在词汇表中\n",
    "                data.append(count)\n",
    "        return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1)) #"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  (0, 0)\t15\n",
      "  (0, 3)\t6\n",
      "  (1, 0)\t78\n",
      "  (1, 1)\t3\n",
      "  (1, 2)\t2\n",
      "  (1, 4)\t1\n",
      "  (1, 5)\t1\n",
      "  (1, 6)\t1\n",
      "  (1, 7)\t1\n",
      "  (2, 0)\t336\n",
      "  (2, 1)\t14\n",
      "  (2, 2)\t11\n",
      "  (2, 3)\t5\n",
      "  (2, 4)\t14\n",
      "  (2, 5)\t16\n",
      "  (2, 6)\t18\n",
      "  (2, 7)\t9\n",
      "  (2, 8)\t15\n",
      "  (2, 9)\t13\n",
      "  (2, 10)\t9\n"
     ]
    }
   ],
   "source": [
    "\n",
    "vocab_transformer = WordCounterToVectorTransformer(vocabulary_size=10)\n",
    "X_few_vectors = vocab_transformer.fit_transform(X_few_wordcounts)\n",
    "print(X_few_vectors)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ 15,   0,   0,   6,   0,   0,   0,   0,   0,   0,   0],\n",
       "       [ 78,   3,   2,   0,   1,   1,   1,   1,   0,   0,   0],\n",
       "       [336,  14,  11,   5,  14,  16,  18,   9,  15,  13,   9]],\n",
       "      dtype=int32)"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_few_vectors.toarray()\n",
    "#将数据转为矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'i': 1,\n",
       " 'in': 2,\n",
       " 'number': 3,\n",
       " 'the': 4,\n",
       " 'of': 5,\n",
       " 'and': 6,\n",
       " 'you': 7,\n",
       " 'to': 8,\n",
       " 'my': 9,\n",
       " 'thi': 10}"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vocab_transformer.vocabulary_\n",
    "#词汇表\n",
    "#上面的矩阵的含义：行代表每个邮件，列索引代表在词汇表中的位置（0为不在词汇表中数量，1代表‘i’出现的次数\n",
    "# 依次类推，每个数值的含义表示出现的次数）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "#使用pipeline来出来邮件\n",
    "from sklearn.pipeline import Pipeline\n",
    "email_to_array_pipeline = Pipeline([\n",
    "    (\"email_to_wordcount\", EmailToWordCounterTransformer()),\n",
    "    (\"wordcount_to_vector\", WordCounterToVectorTransformer()),\n",
    "])\n",
    "email_array = email_to_array_pipeline.fit_transform(x_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(2400, 1001)"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# pd.Series(email_array)\n",
    "email_array.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\program files\\python\\lib\\site-packages\\sklearn\\svm\\base.py:929: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n",
      "  \"the number of iterations.\", ConvergenceWarning)\n",
      "[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[CV]  ................................................................\n",
      "[CV] .................................... , score=0.985, total=   0.2s"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[Parallel(n_jobs=1)]: Done   1 out of   1 | elapsed:    0.1s remaining:    0.0s\n",
      "[Parallel(n_jobs=1)]: Done   2 out of   2 | elapsed:    0.2s remaining:    0.0s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "[CV]  ................................................................\n",
      "[CV] .................................... , score=0.989, total=   0.1s\n",
      "[CV]  ................................................................\n",
      "[CV] .................................... , score=0.986, total=   0.2s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[Parallel(n_jobs=1)]: Done   3 out of   3 | elapsed:    0.4s finished\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([0.98501873, 0.98875   , 0.98623279])"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#使用逻辑回归分类器训练\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.model_selection import cross_val_score\n",
    "lgr_clf = LogisticRegression(solver=\"liblinear\", random_state=42)\n",
    "lgr_clf.fit(email_array,y_train)\n",
    "score = cross_val_score(lgr_clf, email_array, y_train, cv=3, verbose=3)\n",
    "score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_predict = lgr_clf.predict(email_array)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import cross_val_predict\n",
    "cvp = cross_val_predict(lgr_clf,email_array,y_train,cv=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[2010,    8],\n",
       "       [  24,  358]], dtype=int64)"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.metrics import confusion_matrix\n",
    "confusion_matrix(y_train,cvp)\n",
    "#实际是正常邮件被预测为垃圾邮件的有24个，实际为垃圾邮件被预测为正常邮件的有8个"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision_score:  0.9781420765027322\n",
      "recall_score:  0.93717277486911\n",
      "f1_score:  0.9572192513368983\n"
     ]
    }
   ],
   "source": [
    "#查看精确率和召回率\n",
    "from sklearn.metrics import f1_score,precision_score,recall_score\n",
    "print('precision_score: ',precision_score(y_train,cvp))\n",
    "print('recall_score: ',recall_score(y_train,cvp))\n",
    "print('f1_score: ',f1_score(y_train,cvp))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [],
   "source": [
    "#使用伯努利分布的贝叶斯公式\n",
    "from sklearn.naive_bayes import BernoulliNB     \n",
    "blb_clf = BernoulliNB(alpha=1.0,binarize=0.0005)\n",
    "\n",
    "cvp_blb = cross_val_predict(blb_clf,email_array,y_train,cv=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BernoulliNB(alpha=1.0, binarize=0.0005, class_prior=None, fit_prior=True)"
      ]
     },
     "execution_count": 78,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "blb_clf.fit(email_array,y_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1950,   68],\n",
       "       [  72,  310]], dtype=int64)"
      ]
     },
     "execution_count": 79,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "confusion_matrix(y_train,cvp_blb)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision_score:  0.8201058201058201\n",
      "recall_score:  0.8115183246073299\n",
      "f1_score:  0.8157894736842106\n"
     ]
    }
   ],
   "source": [
    "#查看精确率和召回率\n",
    "print('precision_score: ',precision_score(y_train,cvp_blb))\n",
    "print('recall_score: ',recall_score(y_train,cvp_blb))\n",
    "print('f1_score: ',f1_score(y_train,cvp_blb))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "GridSearchCV(cv=5, error_score='raise-deprecating',\n",
       "             estimator=BernoulliNB(alpha=1.0, binarize=0.0005, class_prior=None,\n",
       "                                   fit_prior=True),\n",
       "             iid='warn', n_jobs=None,\n",
       "             param_grid=[{'alpha': [1, 2, 3, 4, 5, 6],\n",
       "                          'binarize': [0.0005, 0.005, 0.05, 0.0001, 0.0002,\n",
       "                                       0.0003, 0.0004]}],\n",
       "             pre_dispatch='2*n_jobs', refit=True, return_train_score=True,\n",
       "             scoring='neg_mean_squared_error', verbose=0)"
      ]
     },
     "execution_count": 62,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#使用网格搜索调超参数\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "param_grid = [\n",
    "    {'alpha':[1,2,3,4,5,6],'binarize':[0.0005,0.005,0.05,0.0001,0.0002,0.0003,0.0004]}\n",
    "]\n",
    "gridSearchCV = GridSearchCV(blb_clf,param_grid,cv= 5,\n",
    "                            scoring='neg_mean_squared_error', return_train_score=True)\n",
    "gridSearchCV.fit(email_array,y_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BernoulliNB(alpha=1, binarize=0.0005, class_prior=None, fit_prior=True)"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gridSearchCV.best_params_\n",
    "#从给到的随机森林的参数组合，选出最好的参数组合\n",
    "gridSearchCV.best_estimator_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BernoulliNB(alpha=1, binarize=0.0005, class_prior=None, fit_prior=True)"
      ]
     },
     "execution_count": 83,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gridSearchCV.best_params_\n",
    "#从给到的随机森林的参数组合，选出最好的参数组合\n",
    "gridSearchCV.best_estimator_\n",
    "#最好的估算器\n",
    "# cvres = gridSearchCV.cv_results_\n",
    "# cvres\n",
    "#取出所有组合的分数\n",
    "# for mean_score,param in zip(cvres['mean_test_score'],cvres['params']):\n",
    "#     print(np.sqrt(-mean_score),param)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',\n",
       "                     metric_params=None, n_jobs=None, n_neighbors=5, p=2,\n",
       "                     weights='uniform')"
      ]
     },
     "execution_count": 81,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "knn = KNeighborsClassifier(n_neighbors=5)\n",
    "knn.fit(email_array,y_train)                           "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {},
   "outputs": [],
   "source": [
    "cvp_knn = cross_val_predict(knn,email_array,y_train,cv=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1994,   24],\n",
       "       [ 174,  208]], dtype=int64)"
      ]
     },
     "execution_count": 90,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "confusion_matrix(y_train,cvp_knn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision_score:  0.896551724137931\n",
      "recall_score:  0.5445026178010471\n",
      "f1_score:  0.6775244299674267\n"
     ]
    }
   ],
   "source": [
    "#查看精确率和召回率\n",
    "print('precision_score: ',precision_score(y_train,cvp_knn))\n",
    "print('recall_score: ',recall_score(y_train,cvp_knn))\n",
    "print('f1_score: ',f1_score(y_train,cvp_knn))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "GridSearchCV(cv=3, error_score='raise-deprecating',\n",
       "             estimator=KNeighborsClassifier(algorithm='auto', leaf_size=30,\n",
       "                                            metric='minkowski',\n",
       "                                            metric_params=None, n_jobs=None,\n",
       "                                            n_neighbors=5, p=2,\n",
       "                                            weights='uniform'),\n",
       "             iid='warn', n_jobs=None,\n",
       "             param_grid=[{'algorithm': ['brute', 'kd_tree', 'ball_tree'],\n",
       "                          'p': [1, 2]},\n",
       "                         {'leaf_size': [40, 50, 70],\n",
       "                          'weights': ['uniform', 'distance']}],\n",
       "             pre_dispatch='2*n_jobs', refit=True, return_train_score=True,\n",
       "             scoring='neg_mean_squared_error', verbose=0)"
      ]
     },
     "execution_count": 95,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "param_grid_knn = [\n",
    "    {'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'p': [1, 2]},\n",
    "    {'weights': ['uniform','distance'], 'leaf_size': [40,50,70]},\n",
    "  ]\n",
    "gridCv_knn = GridSearchCV(knn, param_grid_knn, cv=3,\n",
    "                           scoring='neg_mean_squared_error', return_train_score=True)\n",
    "gridCv_knn.fit(email_array,y_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "KNeighborsClassifier(algorithm='auto', leaf_size=40, metric='minkowski',\n",
       "                     metric_params=None, n_jobs=None, n_neighbors=5, p=2,\n",
       "                     weights='distance')"
      ]
     },
     "execution_count": 96,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gridCv_knn.best_params_\n",
    "#从给到的随机森林的参数组合，选出最好的参数组合\n",
    "gridCv_knn.best_estimator_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [],
   "source": [
    "knn_best = KNeighborsClassifier(algorithm='auto', leaf_size=40, metric='minkowski',\n",
    "                     metric_params=None, n_jobs=None, n_neighbors=5, p=2,\n",
    "                     weights='distance')\n",
    "cvp_knn_best = cross_val_predict(knn_best,email_array,y_train,cv=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1994,   24],\n",
       "       [ 145,  237]], dtype=int64)"
      ]
     },
     "execution_count": 98,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "confusion_matrix(y_train,cvp_knn_best)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision_score:  0.896551724137931\n",
      "recall_score:  0.5445026178010471\n",
      "f1_score:  0.6775244299674267\n"
     ]
    }
   ],
   "source": [
    "#查看精确率和召回率\n",
    "print('precision_score: ',precision_score(y_train,cvp_knn))\n",
    "print('recall_score: ',recall_score(y_train,cvp_knn))\n",
    "print('f1_score: ',f1_score(y_train,cvp_knn))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#总体来看还是逻辑回归的模型更好"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
