from numpy import *
def loadDataSet():
    postingList = [['my', 'dog', 'has', 'flea',\
                    'problems','help','please'],
                   ['maybe','not','take', 'him',\
                    'to', 'dog','park','stupid'],
                   ['my','dalmation', 'is','so','cute',\
                    'I','love','him'],
                   ['stop','posting','stupid','worthless','garbage'],
                   ['mr','licks','ate','my','steak','how',\
                    'to','stop','him'],
                   ['quit','buying','worthless','dog','food','stupid']]
    classVec = [0,1,0,1,0,1]#0代表正常言论 1代表侮辱性词语
    return postingList,classVec
"""
    创建不重复的包含所有词汇的词汇表
    :return 所有词汇
"""
def createVocabList(dataSet):
    vocabSet = set([])
    for document in dataSet:
        vocabSet = vocabSet | set(document)
    return list(vocabSet)
"""
    传入词汇表和某文档
    文档中存在的词将矩阵值设置为1 不存在的设置为0
    :return 文档的词汇向量
"""
def setOfWords2Vec(vocabList, inputSet):
    returnVec = [0]*len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] = 1
        else:
            print("the word %s is not in myVocabulary!" % word)
    return returnVec
"""
    词汇的向量表
"""
def getReturnMat(postingList, myVacobList):
    trainMat = []
    for postingDoc in postingList:
        trainMat.append(setOfWords2Vec(myVacobList, postingDoc))
    return trainMat
"""
    为避免多个概率中一个为0影响结果 初始化时使用ones(),初始概率设置为2.0
"""
def trainNB0(trainMatrix, trainCategory):
    numTrainDocs = len(trainMatrix)
    numWords = len(trainMatrix[0])
    #print(numWords)
    #print(numTrainDocs)
    pAbusive = sum(trainCategory) / float(numTrainDocs)
    p0Num = ones(numWords)#zeros(numWords)
    p1Num = ones(numWords)#zeros(numWords)
    #print(p0Num, p1Num)
    p0Denom = 2.0; p1Denom = 2.0
    for i in range(numTrainDocs):
        if trainCategory[i] == 1:
            p1Num += trainMatrix[i]
            p1Denom += sum(trainMatrix[i])
        else:
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])
    p1Vec = log(p1Num/p1Denom)
    p0Vec = log(p0Num/p0Denom)
    return p0Vec, p1Vec, pAbusive

def classifyNB(vec2Classify, p0Vec, p1Vec, pClass):
    print('sum: ', vec2Classify * p0Vec)
    print('sum1: ', vec2Classify * p1Vec)
    p0 = sum(vec2Classify * p0Vec) + log(pClass)
    p1 = sum(vec2Classify * p1Vec) + log(1.0 - pClass)
    print('p0: ', p0)
    print('p1: ', p1)
    if p1 > p0:
        return 1
    else:
        return 0
"""
    
"""
def testingNB():
    posingList, classVec = loadDataSet()
    myVocabList = createVocabList(posingList)
    TrainMatrix = getReturnMat(posingList, myVocabList)
    p0V, p1V, pAb = trainNB0(TrainMatrix, classVec)
    testEntry = ['love', 'my', 'dalmation']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print(thisDoc)
    print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))
    testEntry = ['stupid', 'garbage']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))
#testingNB()