{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "84034dcb",
   "metadata": {},
   "outputs": [],
   "source": [
    "imoort os\n",
    "normal_file_list=os.listdir(\"item5/item5-ss-data/normal/\")\n",
    "spam_file_list=os.listdir(\"item5/item5-ss-data/spam/\")\n",
    "print(f\"正常邮件的文件列表,{normal_file_list}\")\n",
    "print(\"垃圾邮件的文件列表,{spam_file_List}\")\n",
    "stop_list=[]\n",
    "for line in open(\"item5/item5-ss-data/stopwords.txt\",encoding='utf-8'):\n",
    "    stop_list.append(line[:len(line)-1])\n",
    "print(\"停用词文件内容：\",stopList)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cba0514a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from jieba import cut\t\n",
    "from re import sub\n",
    "def get_words(file,stopList):\n",
    "    words_list=[]\n",
    "    for line in open(file,encoding='utf-8'):\n",
    "         line=line.strip()\n",
    "         line=sub(r'[.【】0-9、——，。！\\~*]','',line)  \n",
    "         line=cut(line)\n",
    "         line=filter(lambda word:len(word)>1,line) \n",
    "        words_list.extend(line)\n",
    "        words=[]\n",
    "        for i in words_list:\n",
    "            if i not in stop_list and i.strip()!='' and i!=None:\n",
    "               words.append(i)\n",
    "    return words\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e3f9448c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from collections import Counter\n",
    "from itertools import chain\n",
    "allwords=[]\n",
    "for spamfile in spam_file_List:\n",
    "words=get_Words(\"../item5/item5-ss-data/spam/\"+spamfile,stop_list)\n",
    "allwords.append(words)\n",
    "for normalfile in normal_file_list:\n",
    "words=ge_words(\"../item5/item5-ss-data/normal/\"+normalfile,stop_list)\n",
    "allwords.append(words)\n",
    "print(\"训练集中所有的有效词语列表：\")\n",
    "print(allwords)\n",
    "frep=Counter(chain(*allwords))\t\n",
    "top_ten=frep.most_common(10)\t\t\n",
    "top_words=[w[0] for w in top_ten]\t\n",
    "print(\"训练集中出现频次最高的前10个词语:\")\n",
    "print(f\"{top_words}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d31e1a7a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "vector=[]\n",
    "for words in allwords:\n",
    "    temp=list(map(lambda x:words.count(x),topWords))\t\t\t                   #每个高频词语在每封邮件中出现的次数\n",
    "    vector.append(temp)\n",
    "vector=np.array(vector)\n",
    "print(\"10个高频词语在每封邮件中出现的次数：\")\n",
    "print(vector)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67435c42",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.naive_bayes import MultinomialNB\n",
    "target=np.array([1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0])\n",
    "x,y=vector,target\n",
    "model=MultinomialNB()\n",
    "model.fit(x,y)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b3b134b",
   "metadata": {},
   "outputs": [],
   "source": [
    "test=os.listdir(\"item5/item5-ss-data/test\")\n",
    "for testFile in test:\n",
    "   words=getWords(\"item5/item5-ss-data/test/\"+testFile,stop_list)\t\n",
    "   test_x=np.array(tuple(map(lambda x:words.count(x),top_words)))\n",
    "   result=model.predict(test_x.reshape(1,-1))\n",
    "   if result==1:\n",
    "          print('\"'+testFile+'\"'+\"是垃圾邮件\")\n",
    "   else:\n",
    "          print('\"'+testFile+'\"'+\"是正常邮件\")\n"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
