"""
Created on 2018/1/24 20:31 星期三
@author: Matt  zhuhan1401@126.com
Description: 使用朴素贝叶斯进行文档分类

训练算法：计算不同的独立特征的条件概率
测试算法：计算错误率
"""

from numpy import *
import re


# 提供训练数据
def loadDataSet():
    postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                   ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                   ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                   ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                   ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                   ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    # 侮辱类为1  非侮辱类为0
    classVec = [0, 1, 0, 1, 0, 1]
    return postingList, classVec

# 根据文档记录获得词汇表
def createVocabSet2List(dataSet):
    vocabSet = set([])
    for document in dataSet:
        vocabSet = vocabSet | set(document)  # 并集
    return list(vocabSet)  # 创建一个包含所有文档中出现的不重复词的列表


# 统计inputWord在vocabList中出现的位置 1 表示出现在某个位置
def setOfWords2Vec(vocabList, inputSet):  # vocabList词汇表 inputSet每句话
    returnVec = [0] * len(vocabList)  # 创建一个所含元素全为0，长度为len(vocabList)的向量
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] = 1
        else:
            print('the word:%s is not in my Vocabulary!' % word)
    return returnVec


# 朴素贝叶斯分类器训练函数  (文档矩阵，文档标签)
def trainNB0(trainMatrix, trainCategory):
    numTrainDocs = len(trainMatrix)
    numWords = len(trainMatrix[0])  # 长度相同，求[0]的即可
    pAbusive = sum(trainCategory) / float(numTrainDocs)  # 侮辱性的为1，求sum得侮辱性句子出现的次数，pAbusive表示侮辱性句子出现的频率
    # 防止因为第一个值是0 导致最后乘积也都为0
    p0Num = ones(numWords)
    p1Num = ones(numWords)
    p0Denom = 2.0
    p1Denom = 2.0
    for i in range(numTrainDocs):
        if trainCategory[i] == 1:
            p1Num += trainMatrix[i]  # 向量相加
            p1Denom += sum(trainMatrix[i])
        else:
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])
    # using log to avoid underflow
    p1Vect = log(p1Num / p1Denom)
    p0Vect = log(p0Num / p0Denom)
    return p0Vect, p1Vect, pAbusive


# Test One
# 把文本列表转为 标记单词在单词列表出现的位置 的矩阵
listOPosts, listClasses = loadDataSet()
myVocabList = createVocabSet2List(listOPosts)
trainMat = []  #
for postinDoc in listOPosts:  # 统计每句话在Vocab中出现的位置，trainMat为len（listOPosts）*len（vocabList）的矩阵
    trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V, p1V, pAb = trainNB0(trainMat, listClasses)


# 朴素贝叶斯分类函数
# 要分类的相邻V2C，计算的三个概率
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
    p1 = sum(vec2Classify * p1Vec) + log(pClass1)
    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
    if p1 > p0:
        return 1
    else:
        return 0
def testingNB():
    listOPosts, listClasses = loadDataSet()
    myVocabList = createVocabSet2List(listOPosts)
    trainMat = []
    for postinDoc in listOPosts:
        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
    p0V, p1V, pAb = trainNB0(array(trainMat), array(listClasses))
    testEntry = ['love', 'my', 'dalmation']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print(testEntry, 'classified as:', classifyNB(thisDoc, p0V, p1V, pAb))
    testEntry = ['stupid', 'garbage']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print(testEntry, 'classified as:', classifyNB(thisDoc, p0V, p1V, pAb))


# 同一个词可能出现多次，由词集模型改为词袋模型
def bagOfWords2VecMN(vocabList, inputSet):# todo
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] += 1
    return returnVec


# Test Two
# testingNB()

# 示例4.6  使用朴素贝叶斯过滤垃圾邮件
# 将文本文件解析成词条向量，使用我们建立的trainNB0函数，逐条检查词条保证解析的正确性
# 使用classifyNB0构建一个新的测试函数计算文档的错误率进行测试
def textParse(bigString):
    listOfTokens = re.split(r'\W*', bigString)
    return [tok.lower() for tok in listOfTokens if len(tok) > 2]  # 666
def spamTest():
    docList = [];
    classList = [];
    fullTest = []
    # 导入
    for i in range(1, 26):
        wordList = textParse(open('email/spam/%d.txt' % i).read())
        docList.append(wordList)
        fullTest.extend(wordList)
        classList.append(1)
        wordList = textParse(open('email/ham/%d.txt' % i).read())
        docList.append(wordList)
        fullTest.extend(wordList)
        classList.append(0)
    vocabList = createVocabSet2List(docList)
    trainingSet = list(range(50));
    testSet = []  # trainingSet是一个值从0到49的整数列表，随机选择10个文件进入测试集
    # 随机构建训练集，留存交叉验证
    for i in range(10):
        randIndex = int(random.uniform(0, len(trainingSet)))
        testSet.append(trainingSet[randIndex])
        del (trainingSet[randIndex])
    trainMat = [];
    trainClasses = []
    for docIndex in trainingSet:
        trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))
    errorCount = 0
    # 对测试集分类
    for docIndex in testSet:
        wordVector = setOfWords2Vec(vocabList, docList[docIndex])
        if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
    print('the error count is %d, the error rate is : %f %% ' % (errorCount, (float(errorCount) / 100 * len(testSet))))


# Test Three
spamTest()
