import re
import numpy
import pandas
#对单词原型的还原很重要，可以减少词汇表的长度，在中文中很少用到，主要用于其他语言
obj=re.compile(r'\W+')
df=pandas.read_table('./SMSSpamCollection',sep='\t',header=None)
df.columns=['type','message']
groups=df.groupby(by='type').groups
ham_mess=df.iloc[groups['ham']]#非垃圾邮件
spam_mess=df.iloc[groups['spam']]#垃圾邮件
label=[]
li=[]
for i in range(len(ham_mess)):
    label.append(0)
    s=obj.split(ham_mess.iloc[i,1])
    s=[i.lower() for i in s if len(i)>2]#去掉长度在3个以下
    li.append(s)
for i in range(len(spam_mess)):
    label.append(1)
    s = obj.split(spam_mess.iloc[i, 1])
    s = [i.lower() for i in s if len(i) > 2]#去掉长度在3个以下
    li.append(s)
#下面创建词汇表
def createVocablist(dataset):
    vocablist=numpy.unique([])
    for document in dataset:
        document=numpy.unique(document)
        vocablist=numpy.union1d(vocablist,document)
    return vocablist
#转化为词向量
def createwordsvec(vocablist,input):
    wordsvec=numpy.zeros(len(vocablist))#词向量
    for i in input:
        if i in vocablist:
            wordsvec[vocablist==i]+=1#文档分类要运用多项式模型
    return wordsvec
def trainNB(trainmatrix,trainlabel):
    length=len(trainmatrix)#文档的数目
    numwords=len(trainmatrix[0])
    p0Num=numpy.ones(numwords)#平滑处理
    p1Num=numpy.ones(numwords)#平滑处理
    p0sumnum=2#平滑处理
    p1sumnum=2#平滑处理
    p_abusive=(numpy.sum(trainlabel)+1)/(length+2)#计算属于侮辱性文档的概率,平滑处理
    for i in range(length):
        if trainlabel[i]==1:#计算条件概率
            p1Num+=trainmatrix[i]
            p1sumnum+=numpy.sum(trainmatrix[i])
        else:
            p0Num += trainmatrix[i]
            p0sumnum += numpy.sum(trainmatrix[i])
    p01=p0Num/p0sumnum#为了防止概率过小，影响精度，转换为log
    p11=p1Num/p1sumnum
    return p01,p11,p_abusive#这里计算的概率都是该单词发生的概率
def NBClassifier(p01,p11,pclass1,predict_vec):
    p00=1-p01#该单词不出现的概率
    p10=1-p11#该单词不出现的概率
    p0=numpy.sum(numpy.log(p01)*predict_vec)+numpy.sum(numpy.log(p00)*numpy.logical_not(predict_vec))+numpy.log(1-pclass1)
    p1=numpy.sum(numpy.log(p11)*predict_vec)+numpy.sum(numpy.log(p10)*numpy.logical_not(predict_vec))+numpy.log(pclass1)
    if p1>p0:
        return 1
    else:
        return 0
if __name__ == '__main__':
    index=numpy.random.permutation(5572)
    index1=index[0:4572]
    index2=index[4572:]
    my_vocablist = createVocablist(li)
    train_li=[]
    train_label=[]
    for i in index1:
        train_li.append(li[i])
        train_label.append(label[i])
    trainmatrix = numpy.zeros((len(train_li), len(my_vocablist)))
    for i in range(len(train_li)):
        trainmatrix[i, :] = createwordsvec(my_vocablist, train_li[i])
    p0, p1, p_spam = trainNB(trainmatrix, train_label)  # p0,p1都是单词出现的概率
    test_li = []
    test_label = []
    for i in index2:
        test_li.append(li[i])
        test_label.append(label[i])
    right=0
    for i in range(1000):
        test_words_vec = createwordsvec(my_vocablist,test_li[i])
        res=NBClassifier(p0, p1, p_spam, test_words_vec)
        if res==test_label[i]:
            right+=1
    print(right/1000)
