# 使用朴素贝叶斯过滤垃圾邮件
import bayes
import random

# 导入并解析文本文件
docList=[]; classList = []; fullText =[]
for i in range(1,26):
    wordList = bayes.textParse(open('email/spam/%d.txt' % i).read());
    docList.append(wordList);fullText.extend(wordList);
    classList.append(1);
    wordList = bayes.textParse(open('email/ham/%d.txt' % i).read());
    docList.append(wordList);
    fullText.extend(wordList);classList.append(0);
# 创建一个包含所有文档中出现的不重复词的列表  
vocabList = bayes.createVocabList(docList)
# 构建一个测试集与一个训练集，随机选出10封邮件作为测试集，其余的作为训练集
trainingSet = list(range(50)); testSet=[]
for i in range(10):
    randIndex = int(random.uniform(0,len(trainingSet)));
    testSet.append(trainingSet[randIndex]);
    del(trainingSet[randIndex])
# 调用bagOfWords2VecMN()函数，依次生成词条向量
trainMat=[]; trainClasses = []
for docIndex in trainingSet:
    trainMat.append(bayes.bagOfWords2VecMN(vocabList, docList[docIndex]));
    trainClasses.append(classList[docIndex])
# 调用trainNB0()函数训练，计算属于垃圾邮件的概率，及两个类别的概率向量
p0V,p1V,pSpam = bayes.trainNB0(bayes.array(trainMat),bayes.array(trainClasses))
# 测试，对测试集上的文档做分类
testMat=[]; testClasses = []; predictClasses = []
for docIndex in testSet:
    wordVector = bayes.bagOfWords2VecMN(vocabList, docList[docIndex]);
    testMat.append(wordVector);testClasses.append(classList[docIndex]);
    predictClasses.append(bayes.classifyNB(bayes.array(wordVector),p0V,p1V,pSpam))
# 计算分类函数的错误率
errorCount = 0
for i in range(len(testSet)):
    if predictClasses[i] != testClasses[i]:
        errorCount += 1
print ('the error rate is: ',float(errorCount)/len(testSet))

# 发现垃圾邮件与正常邮件在用词上的区别
import numpy as np
p0VSort = np.argsort(-p0V);p1VSort = np.argsort(-p1V)
p0List = p0VSort.tolist();p1List = p1VSort.tolist()
p0Max20 = p0List[0:20];p1Max20 = p1List[0:20]
print ('正常邮件最常见用词')
for i in p0Max5:print (' ',vocabList[i])
print ('垃圾邮件最常见用词')
for i in p1Max5:print (' ',vocabList[i])

# 封装的垃圾邮件测试函数
#bayes.spamTest()