import numpy
#朴素贝叶斯文本分类的特殊性，
li=[['my','dog','has','flea','problem','help','please'],
    ['maybe','not','take','him','to','dog','park','stupid'],
    ['my','dalmation','is','so','cute','I','love','him'],
    ['stop','posting','stupid','worthless','garbage'],
    ['mr','licks','ate','my','steak','how','to','stop','him'],
    ['quit','buying','worthless','dog','food','stupid']]#通过训练的文本提取出词汇表
label=[0,1,0,1,0,1]
#下面创建词汇表
def createVocablist(dataset):
    vocablist=numpy.unique([])
    for document in dataset:
        document=numpy.unique(document)
        vocablist=numpy.union1d(vocablist,document)
    return vocablist
# postlist=numpy.array()
# print(postlist)
#转化为词向量
def createwordsvec(vocablist,input):
    wordsvec=numpy.zeros(len(vocablist))#词向量
    for i in input:
        if i in vocablist:
            wordsvec[vocablist==i]+=1#文档分类要运用多项式模型
        else:
            print(f"the word :{i} is not in my vocablist")
    return wordsvec
def trainNB(trainmatrix,trainlabel):
    length=len(trainmatrix)#文档的数目
    numwords=len(trainmatrix[0])
    p0Num=numpy.ones(numwords)#平滑处理
    p1Num=numpy.ones(numwords)#平滑处理
    p0sumnum=2#平滑处理
    p1sumnum=2#平滑处理
    p_abusive=(numpy.sum(trainlabel)+1)/(length+2)#计算属于侮辱性文档的概率,平滑处理
    for i in range(length):
        if trainlabel[i]==1:#计算条件概率
            p1Num+=trainmatrix[i]
            p1sumnum+=numpy.sum(trainmatrix[i])
        else:
            p0Num += trainmatrix[i]
            p0sumnum += numpy.sum(trainmatrix[i])
    p01=p0Num/p0sumnum#为了防止概率过小，影响精度，转换为log
    p11=p1Num/p1sumnum
    return p01,p11,p_abusive#这里计算的概率都是该单词发生的概率
def NBClassifier(p01,p11,pclass1,predict_vec):
    p00=1-p01#该单词不出现的概率
    p10=1-p11#该单词不出现的概率
    p0=numpy.sum(numpy.log(p01)*predict_vec)+numpy.sum(numpy.log(p00)*numpy.logical_not(predict_vec))+numpy.log(1-pclass1)
    p1=numpy.sum(numpy.log(p11)*predict_vec)+numpy.sum(numpy.log(p10)*numpy.logical_not(predict_vec))+numpy.log(pclass1)
    if p1>p0:
        return 1
    else:
        return 0
if __name__ == '__main__':
    my_vocablist = createVocablist(li)
    trainmatrix = numpy.zeros((len(li), len(my_vocablist)))
    for i in range(len(li)):
        trainmatrix[i, :] = createwordsvec(my_vocablist, li[i])
    p0, p1, p_abusive = trainNB(trainmatrix, label)  # p0,p1都是单词出现的概率
    test_words=['love','my','dalmation']
    test_words_vec=createwordsvec(my_vocablist,test_words)
    print(NBClassifier(p0,p1,p_abusive,test_words_vec))
    test_words = ['stupid','garbage']
    test_words_vec = createwordsvec(my_vocablist, test_words)
    print(NBClassifier(p0, p1, p_abusive, test_words_vec))