#读取邮件列表
import os
normal_file_list = os.listdir('data/normal')
spam_file_list = os.listdir('data/spam')
print(f'正常邮件；{normal_file_list}')
print(f'异常邮件：{spam_file_list}')
#读取停用词
stop_list = []
for line in open('data/',encoding='utf-8'):
    stop_list.append(line[:len(line)-1])
print(f'停用词内容；{stop_list}')

#创建分词统计函数
from jieba import cut#中文分词库
from re import sub
def get_words(file,stop_list):
    words_list = []
    for line in open(file,encoding='utf-8'):
        line = line.strip()
        line = sub(r'[.【】 0-9,————,。！~*]','',line)
        line = cut(line)
        line = filter(lambda word:len(word)>1,line)
        words_list.extend(line)
        words=[]
        for i in words_list:
            if i not in stop_list and i.strip()!='' and i!=None:
                words.append(i)
    return words

#提取高频词
from collections import Counter
from itertools import chain
allwords=[]
for spamfile in spamFileList:
    words=getWords(""+spamfile,stopList)
    allwords.append(words)
for normalfile in normalFileList:
    words=getWords(""+normalfile,stopList)
    allwords.append(words)
print("训练集中所有的有效词语列表:")
print(allwords)

#提取训练集中出现频次最高的前10个词语
frep=Counter(chain(*allwords))  #获取有效词语出现的频次
topTen=frep.most_common(10)
                                #获取出现频次最高的前10个词语和对应的频次
topWords=[w[0] for w in topTen] #获取出现频次最高的前10个词语
print("训练集中出现频次最高的前10个词语:")
print(topWords)

#统计高频词出现次数
import numpy as np

vector = []
for words in allwords:
    temp = list(map(lambda x: words.count(x), topWords))
    vector.append(temp)
#每个高频词语在每封邮件中出现的次数
vector = np.array(vector)

print("10个高频词语在每封邮件中出现的次数：")
print(vector)

#训练分类器
from sklearn.naive_bayes import MultinomialNB

# 为数据集打标签，1表示垃圾邮件，0表示正常邮件
target = np.array([1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0])
X, Y = vector, target

# 建立多项式朴素贝叶斯模型并进行训练
model = MultinomialNB()
model.fit(X, Y)

#测试邮件的类别预测
test = os.listdir("data/test")

# 使用模型进行预测
for testFile in test:
    # 调用getWords()函数，提取文件中的词语
    words = getWords("data/test/" + testFile, stopList)
    # 提取10个高频词语分别在邮件中出现的次数
    test_x = np.array(tuple(map(lambda x: words.count(x), topWords)))
    test_x = test_x.reshape(1, -1)
    result = model.predict(test_x)
    
    if result == 1:
        print("" + testFile + "" + "是垃圾邮件")
    else:
        print("" + testFile + "" + "是正常邮件")