"""
关键代码讲解：https://blog.csdn.net/m0_37970224/article/details/86375013
             https://blog.csdn.net/u011475210/article/details/77926012
"""

import re
import random
import numpy as np
from functools import reduce

def createVocabList(docList):
    returnVocab = set([])
    for document in docList:
        returnVocab = returnVocab | set(document)
    return list(returnVocab)


# 输入字符串，输出单词列表
def testParse(bigString): # 输入为一个大的字符串，输出为一个单词列表
    listOfTokens = re.split(r"\W+", bigString)
    return [tok.lower() for tok in listOfTokens if len(tok) > 2]
    # 最后一行负责生成并返回一个列表， 列表的内容为listOfTokens中长度大于2的单词，并统一转为小写


def setWordMap(myVocabList, document):
    returnVec = [0] * len(myVocabList)
    for word in document:
        if word in myVocabList:
            returnVec[myVocabList.index(word)] = 1
        else:
            print("in vocablist has no: %s" % word)
    return returnVec


def trainNB(trainMat, trainClasses):
    numTrainMat = len(trainMat)
    numWords = len(trainMat[0])
    pAb = sum(trainClasses) / numTrainMat
    p0Num = np.zeros(numWords)
    p1Num = np.zeros(numWords)
    p0Norm = 0.0
    p1Norm = 0.0
    for i in range(numTrainMat):
        if trainClasses[i] == 1:
            p1Num += trainMat[i]
            p1Norm += sum(trainMat[i])
        else:
            p0Num += trainMat[i]
            p0Norm += sum(trainMat[i])
    p0V = p0Num / p0Norm
    p1V = p1Num / p1Norm
    return p0V, p1V, pAb


def classifyNB(testingMat, p0V, p1V, pAb):
    p1 = reduce(lambda x, y: x * y, testingMat * p1V) * pAb
    p0 = reduce(lambda x, y: x * y, testingMat * p0V) * (1 - pAb)
    if p1 > p0:
        return 1
    else:
        return 0


def spamTest():
    docList = []
    classList = []
    fullText = []
    for i in range(1, 26): # 导入文本文件
        wordList = testParse(open("email\spam\%d.txt" % i).read())
        docList.append(wordList)  # 此处注意append和extend的用法，append是将列表放入另一个列表中，而extend是直接将元素放入到原来的列表中
        fullText.extend(wordList) #因此，docList中是25行n列的数组，每一行代表每一篇邮件，fullText中是25篇邮件中所有的出现的词汇的集合
        classList.append(1)
        wordList = testParse(open("email\ham\%d.txt" % i).read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)
    """
    此时，docList中是50行n列，每行代表一篇邮件
          fullText是所有词汇的集合
          classList是每篇邮件对应的分类（下标index对应）
    """
    vocabList = createVocabList(docList) # 创建一个包含所有文档中出现的不重复词的列表
    # 初始化测试集列表
    trainingSet = list(range(50))
    testSet = []
    for i in range(10):
        # 随机的得到10个测试样本
        randIndex = int(random.uniform(0, len(trainingSet))) # uniform() 方法将随机生成下一个实数，它在 [x, y] 范围内。
        testSet.append(trainingSet[randIndex])  # 将该样本加入测试集中
        del(trainingSet[randIndex])
    # 初始化训练集列表：训练集和训练集标签
    trainMat = []
    trainClasses = []
    for docIndex in trainingSet:
        # 词表转换到向量，并加入到训练数据列表中
        trainMat.append(setWordMap(vocabList, docList[randIndex]))
        # 相应的标签也加入训练标签列表中
        trainClasses.append(classList[docIndex])
    p0V, p1V, pSpam = trainNB(np.array(trainMat), np.array(trainClasses))
    errorCount = 0
    # 遍历测试集进行测试
    for docIndex in testSet:
        wordVector = setWordMap(vocabList, docList[docIndex]) # 词表转换到向量
        if classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
    print("the num of error is:%d" % errorCount)
    print("the error rate is:%f" % (errorCount / len(testSet)))


if __name__ == '__main__':
    spamTest()