# coding:utf-8
# from sklearn import datasets
# iris = datasets.load_iris()
# from sklearn.naive_bayes import GaussianNB
# clf = GaussianNB()
# clf = clf.fit(iris.data,iris.target)
# y_pred = clf.predict(iris.data)
# print("高斯朴素贝叶斯，样本总数： %d 错误样本数 : %d" % (iris.data.shape[0],(iris.target != y_pred).sum()))
############多项式模型######################
""" 特征是离散的使用多项式模型,
 计算先验概率P(yk)条件概率p(Xi|Yk)做平滑处理
 P(Yk) = Nyk+α / N + kα
 N是总的样本个数，k是总的类别个数，Nyk是类别为yk的样本个数，α是平滑值
 P(Xi|Yk) = Nykxi+α/Nyk+nα
 Nyk是类别为yk的样本个数，n是特征的维数，Nyk,xi是类别为yk的样本中，第i维特征的值是xi的样本个数，α是平滑值
 当α=1时，称作Laplace平滑，当0<α<1时，称作Lidstone平滑，α=0时不做平滑
 如果不做平滑，当某一维特征的值xi没在训练样本中出现过时，会导致P(xi|yk)=0，从而导致后验概率为0。加上平滑就可以克服这个问题
"""
#############高斯模型################
"""
当特征是连续变量的时候，运用多项式模型就会导致很多P(xi|yk)=0（不做平滑的情况下），
此时即使做平滑，所得到的条件概率也难以描述真实情况。所以处理连续的特征变量，应该采用高斯模型(假设服从高斯分布（正态分布）)
假设男性和女性的身高、体重、脚掌都是正态分布，通过样本计算出均值和方差，也就是得到正态分布的密度函数。
有了密度函数，就可以把值代入，算出某一点的密度函数的值
"""
##############伯努利模型########################
"""与多项式模型一样，伯努利模型适用于离散特征的情况
 伯努利模型中，条件概率P(xi|yk)的计算方式是：
 当特征值xi为1时，P(xi|yk)=P(xi=1|yk)；
 当特征值xi为0时，P(xi|yk)=1?P(xi=0|yk)
"""
######################g###################
import random
from numpy import *

def main():
    import pickle
    import jieba
    import time
    stop_word = []
    def loadStopword():
        with open(r'F:\Archive\files\stopwords.txt','r',encoding='gb2312') as f:
            lines = f.readlines()
            for line in lines:
                stop_word.append(line.strip())
            print(stop_word)
    loadStopword()
    '''
        创建词集
        params:
            documentSet 为训练文档集
        return:词集, 作为词袋空间
    '''
    def createVocabList(documentSet):
        vocabSet = set([])
        for document in documentSet:
            vocabSet = vocabSet | set(document)  # union of the two sets
        return list(vocabSet)

    '''
       文本处理，如果是未处理文本，则先分词（jieba分词）,再去除停用词
    '''
    def textParse(bigString, load_from_file=True):  # input is big string, #output is word list
        if load_from_file:
            listOfWord = bigString.split('/ ')
            listOfWord = [x for x in listOfWord if x != ' ']
            return listOfWord
        else:
            cutted = jieba.cut(bigString, cut_all=False)
            listOfWord = []
            for word in cutted:
                if word not in stop_word:
                    listOfWord.append(word)
            return [word.encode('utf-8') for word in listOfWord]

    '''
        交叉训练
    '''
    CLASS_AD = 1
    CLASS_NOT_AD = 0

    def testClassify():
        listADDoc = []
        listNotADDoc = []
        listAllDoc = []
        listClasses = []

        print("----loading document list----")

        # 两千个标注为广告的文档
        for i in range(1, 1001):
            wordList = textParse(open('subject/subject_ad/%d.txt' % i).read())
            listAllDoc.append(wordList)
            listClasses.append(CLASS_AD)
        # 两千个标注为非广告的文档
        for i in range(1, 1001):
            wordList = textParse(open('subject/subject_notad/%d.txt' % i).read())
            listADDoc.append(wordList)
            listClasses.append(CLASS_NOT_AD)

        print("----creating vocab list----")
        # 构建词袋模型
        listVocab = createVocabList(listAllDoc)

        docNum = len(listAllDoc)
        testSetNum = int(docNum * 0.1)

        trainingIndexSet = range(docNum)  # 建立与所有文档等长的空数据集（索引）
        testSet = []  # 空测试集

        # 随机索引，用作测试集, 同时将随机的索引从训练集中剔除
        for i in range(testSetNum):
            randIndex = int(random.uniform(0, len(trainingIndexSet)))
            testSet.append(trainingIndexSet[randIndex])
            del (trainingIndexSet[randIndex])

        trainMatrix = []
        trainClasses = []

        for docIndex in trainingIndexSet:
            trainMatrix.append(bagOfWords2VecMN(listVocab, listAllDoc[docIndex]))
            trainClasses.append(listClasses[docIndex])

        print("----traning begin----")
        pADV, pNotADV, pClassAD = trainNaiveBayes(array(trainMatrix), array(trainClasses))

        print("----traning complete----")
        print("pADV:", pADV)
        print("pNotADV:", pNotADV)
        print("pClassAD:", pClassAD)
        print("ad: %d, not ad:%d" % (CLASS_AD, CLASS_NOT_AD))

        args = dict()
        args['pADV'] = pADV
        args['pNotADV'] = pNotADV
        args['pClassAD'] = pClassAD

        fw = open("args.pkl", "wb")
        pickle.dump(args, fw, 2)
        fw.close()

        fw = open("vocab.pkl", "wb")
        pickle.dump(listVocab, fw, 2)
        fw.close()

        errorCount = 0
        for docIndex in testSet:
            vecWord = bagOfWords2VecMN(listVocab, listAllDoc[docIndex])
            if classifyNaiveBayes(array(vecWord), pADV, pNotADV, pClassAD) != listClasses[docIndex]:
                errorCount += 1
                doc = ' '.join(listAllDoc[docIndex])
                print("classfication error", doc.decode('utf-8', "ignore").encode('gbk'))
        print('the error rate is: ', float(errorCount) / len(testSet))

    # 分类方法(这边只做二类处理)
    def classifyNaiveBayes(vec2Classify, pADVec, pNotADVec, pClass1):
        pIsAD = sum(vec2Classify * pADVec) + log(pClass1)  # element-wise mult
        pIsNotAD = sum(vec2Classify * pNotADVec) + log(1.0 - pClass1)

        if pIsAD > pIsNotAD:
            return CLASS_AD
        else:
            return CLASS_NOT_AD

    '''
        训练
        params:
            tranMatrix 由测试文档转化成的词空间向量 所组成的 测试矩阵
            tranClasses 上述测试文档对应的分类标签
    '''
    def trainNaiveBayes(trainMatrix, trainClasses):
        numTrainDocs = len(trainMatrix)
        numWords = len(trainMatrix[0])  # 计算矩阵列数, 等于每个向量的维数

        # REW:计算数量的trick
        numIsAD = len(list(filter(lambda x: x == CLASS_AD, trainClasses)))
        pClassAD = numIsAD / float(numTrainDocs)

        pADNum = ones(numWords)
        pNotADNum = ones(numWords)
        pADDenom = 2.0
        pNotADDenom = 2.0

        for i in range(numTrainDocs):
            if trainClasses[i] == CLASS_AD:
                pADNum += trainMatrix[i]
                pADDenom += sum(trainMatrix[i])
            else:
                pNotADNum += trainMatrix[i]
                pNotADDenom += sum(trainMatrix[i])

        pADVect = log(pADNum / pADDenom)
        pNotADVect = log(pNotADNum / pNotADDenom)

        return pADVect, pNotADVect, pClassAD

    '''
        将输入转化为向量，其所在空间维度为 len(listVocab)
        params: 
            listVocab-词集
            inputSet-分词后的文本，存储于set
    '''

    def bagOfWords2VecMN(listVocab, inputSet):
        returnVec = [0] * len(listVocab)
        for word in inputSet:
            if word in listVocab:
                returnVec[listVocab.index(word)] += 1
        return returnVec

    '''
        读取保存的模型，做分类操作
    '''

    def adClassify(text):
        fr = open("args.pkl", "rb")
        args = pickle.load(fr)
        pADV = args['pADV']
        pNotADV = args['pNotADV']
        pClassAD = args['pClassAD']
        fr.close()

        fr = open("vocab.pkl", "rb")
        listVocab = pickle.load(fr)
        fr.close()

        if len(listVocab) == 0:
            print("got no args")
            return

        text = textParse(text, False)
        vecWord = bagOfWords2VecMN(listVocab, text)
        class_type = classifyNaiveBayes(array(vecWord), pADV, pNotADV, pClassAD)

        print("classfication type:%d" % class_type)

    loadStopword()
    while True:
        opcode = input("input 1 for training, 2 for ad classify: ")
        if opcode.strip() == "1":
            begtime = time.time()
            testClassify()
            print("cost time total:", time.time() - begtime)
        else:
            text = input("input the text:")
            adClassify(text)


# 从文本中构建词向量
def loadDataSet():
    postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                   ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                   ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                   ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                   ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                   ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    classVec = [0, 1, 0, 1, 0, 1]  # 1 is abusive, 0 not
    return postingList, classVec

# 创建词汇表 唯一地词汇
def createVocabList(dataSet):
    vocabSet = set([])
    for document in dataSet:
        vocabSet = vocabSet | set(document)  # union of the two sets
    return list(vocabSet)

# 词集模型~每个词的出现与否作为一个特征
def setOfWords2Vec(vocablist,inputSet):
    returnVec = [0]*len(vocablist)
    for word in inputSet:
        if word in vocablist:
            returnVec[vocablist.index(word)]=1
        else:
            print("the word: %s is not in my Vocabulary!" % word)
    return returnVec

def trainNB0(trainMatrix, trainCategory):
    numTraindocs = len(trainMatrix)
    numWords = len(trainMatrix[0])
    # 其实是计算1类别占总量的概率
    pAbusive = sum(trainCategory)/float(numTraindocs)
    # 存每个词的概率
    # REW:初始化 占位 并行处理几个维度
    # p0Num = zeros(numWords);p1Num=zeros(numWords)
    p0Num = ones(numWords);p1Num=ones(numWords)
    # p0Denom = 0.0;p1Denom=0.0
    p0Denom = 2.0;p1Denom=2.0 #避免出现概率为0情况
    for i in range(numTraindocs):
        if trainCategory[i] == 1:
            # 因为是1 所以相加 相当于数量+1
            p1Num += trainMatrix[i]
            # 增加所有词条的计数值 作为词条的条件
            p1Denom += sum(trainMatrix[i])
        else:
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])

    # p1Vect = p1Num / p1Denom # 在1类别下的特征条件概率
    p1Vect = log(p1Num / p1Denom) # 在1类别下的各特征条件概率
    p0Vect = log(p0Num / p0Denom) # 取对数，避免下溢出
    return p0Vect,p1Vect,pAbusive

def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
    # REW: 这里是用的对数trick:ln(a*b)=ln(a)+ln(b)
    p1 = sum(vec2Classify*p1Vec)+log(pClass1)  # FAQ:vec2Classify要乘？
    p0 = sum(vec2Classify*p0Vec)+log(1-pClass1)
    if p1 > p0:
        return 1
    else:
        return 0


def testingNB():
    listOPosts,listClasses = loadDataSet()
    myVocabList = createVocabList(listOPosts)
    trainMat=[]
    for postinDoc in listOPosts:
        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
    p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
    testEntry = ['love', 'my', 'dalmation']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))
    testEntry = ['stupid', 'garbage']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))


# 一个词在文档中出现不止一次，这可能意味着包含该词是否出现在文档中所不能表达的某种信息
# 词袋模型~词袋中，每个单词可以出现多次
def bagOfWords2Vec(vocablist,inputSet):
    returnVec = [0] * len(vocablist)
    for word in inputSet:
        if word in vocablist:
            returnVec[vocablist.index(word)] = 1
    return returnVec

#电子邮件垃圾过滤
def EmailFilter():
    def textParse(bigString):#REW:这可能是最重要的动作之
        import re
        listOfTokens = re.split(r'\W*',bigString)
        return [tok.lower() for tok in listOfTokens if len(tok) > 2]
    def spanTest():
        docList=[];classList=[];fullText=[]
        for i in range(1,26):
            wordList = textParse(open(r'F:\Resources\Dataset\email\spam\%d.txt' % i).read())
            docList.append(wordList)
            fullText.extend(wordList)
            classList.append(1)
            wordList = textParse(open(r'F:\Resources\Dataset\email\spam\%d.txt' % i).read())
            docList.append(wordList)
            fullText.extend(wordList)
            classList.append(0)
        vocabList = createVocabList(docList)  # create vocabulary
        trainingSet = range(50);testSet=[]
        for i in range(10):
            randIndex = int(random.uniform(0,len(trainingSet)))
            testSet.append(trainingSet[randIndex])
            del (trainingSet[randIndex])
        trainMat = [];trainClasses = []
        for docIndex in trainingSet:  # train the classifier (get probs) trainNB0
            trainMat.append(bagOfWords2Vec(vocabList, docList[docIndex]))
            trainClasses.append(classList[docIndex])
        p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
        errorCount = 0
        for docIndex in testSet:  # classify the remaining items
            wordVector = bagOfWords2Vec(vocabList, docList[docIndex])
            if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
                errorCount+=1
        print('the error rate is: ', float(errorCount) / len(testSet))
        # return vocabList,fullText

#  使用朴素贝叶斯分类器从个人广告中获取区域倾向 TODO:

if __name__ == "__main__":
    testingNB()