import re
import numpy as np
from sys import exit
Path = './Ch04/email/'
# 功能：
# 参数：
# return:


def loadDataSet():
    postingList = [
        ['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
        ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
        ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
        ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
        ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
        ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']
    ]
    # 1 is abusive, 0 not
    classVec = [0, 1, 0, 1, 0, 1]
    return postingList, classVec


# 功能：create new vocabulary list of document
# 参数：matrix
# return:list-vocabulary list
def creatVocabList(dataSet):
    vocabSet = set([])
    for document in dataSet:
        vocabSet = vocabSet | set(document)
    return list(vocabSet)


# 功能：judge wether the inputSet have word of vocabList,
# 参数：set-,array
# return:
def setOfWord2Vec(vocabList, inputSet):
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if (word in vocabList):
            returnVec[vocabList.index(word)] = 1  # 因为用的是贝努利模型，不考虑词的重复次数
    return returnVec


# 功能：
# 参数：
# return:


def bagOfWord2Vec(vocabList, sentence):
    returnVec = [0] * len(vocabList)
    for word in sentence:
        if (word in vocabList):
            returnVec[vocabList.index(word)] += 1
    return returnVec


# 功能：
# 参数：narray-document,list-;
# return:float-Pc,narray-Pw_c(w is a vector)
def trainNB0(trainMatrix, trainCategory):
    """
    for i in range(trainMatrix.shape[0]):
        trainMatrix[i] = setOfWord2Vec(vocabList, trainMatrix[i])
    """
    # Pc,Pw,Pw_c
    Pc = sum(trainCategory) / float(trainMatrix.shape[0])  # abusive/totalDoc
    # 等权的词条统计，因为需要广播，所以用numpy
    vocabCount1 = np.ones(trainMatrix.shape[1])
    vocabCount0 = np.ones(trainMatrix.shape[1])
    for i in range(trainMatrix.shape[0]):
        if (trainCategory[i] == 1):
            vocabCount1 += trainMatrix[i]
        else:
            vocabCount0 += trainMatrix[i]
    # 条件概率
    Pw_c1 = vocabCount1 / float(sum(vocabCount1))
    Pw_c0 = vocabCount0 / float(sum(vocabCount0))
    return Pc, np.log(Pw_c1), np.log(Pw_c0)


# 功能：
# 参数：
# return:


def classifyNB(wordVec, Pw_c0, Pw_c1, Pc1):
    # wordVec[i]=1说明要计算词汇i的Pwi_c
    p1 = (wordVec * Pw_c1).sum() + np.log(Pc1)  # 这种方式很巧妙,借助向量计算的典型例子，比循环好多了
    p0 = (wordVec * Pw_c0).sum() + np.log(1.0 - Pc1)
    if p1 > p0:
        return 1
    else:
        return 0


# 功能：将字符串转为词数组，并进行词过滤
# 参数：str
# return:list-word；编写这类函数最好返回原始数据，不要用numpy narray，免得影响兼容性


def textParse(bigString):

    # 1.先对文章中对一些特殊词进行转义：如$100 -> tag_price ？：
    paraphrase = {
        'tag_price': r'\B\$\d+\b',
        'tag_percent': r'\b\d+%\B',
        'tag_quant': r'\b\d+[a-z]{1,3}\b',
        'tag_num': r'\b\d+\b'
    }  # tag_quant：量词
    # TODO：可变字符串 bigString=re.sub(pat, rep, bigString, re.M)
    for rep, pat in paraphrase.items():
        bigString = re.sub(
            pat, rep, bigString,
            re.M)  # error:re.sub(pat, rep, bigString, re.M),bigString是值传递
    # 2.对文章进行分割
    # pattern = r'[^a-zA-Z]+'
    splitPattern = r'\W+'
    wordArray = re.split(
        splitPattern,
        bigString)  # notice:pattern is the subString needed to delete
    # 3.lower and exclude len<3,only char,_,digit
    wordArray = [word.lower() for word in wordArray if len(word) > 2]
    return wordArray


def spam_test():
    # 功能：用朴素贝叶斯分类器进行垃圾邮件分类，并测试其准确率；使用的是词集模型
    # 参数：
    # return:error Ratio -float

    # import all file and turn to word-Array
    wordArray = []
    spamBegin = 25
    for label in ['ham', 'spam']:
        for i in range(1, 26):
            filePath = Path + '%s/%d.txt' % (label, i)
            with open(filePath, encoding='UTF-8') as fileHandle:
                wordArray.append(textParse(fileHandle.read()))
    wordArray = np.array(wordArray)
    # random ten num to for the idx of test Set;create fileMatrix
    # 好技巧：for _ in range(n)中 _ 占位符 表示不在意变量的值 只是用于循环遍历n次，无法打印变量值。
    # divide the dataSet to (text,train)
    # trainNum = 40
    # error:randint可能会重复，
    # 不要把测试集和训练集搞反了
    testIdx = np.random.choice(50, 10, replace=False)
    trainIdx = np.setdiff1d(np.arange(50), testIdx)  # 逆花式索引
    # provide  para for train function
    vocabList = creatVocabList(wordArray)
    trainCategory = [int(i >= spamBegin) for i in trainIdx]
    trainMatrix = []
    # error：wordArray[trainIdx] list没有花式索引
    for emailVocab in wordArray[trainIdx]:
        trainMatrix.append(setOfWord2Vec(vocabList, emailVocab))  # 使用词集模型
    trainMatrix = np.array(trainMatrix, int)
    # training the data
    trainResult = trainNB0(
        trainMatrix, trainCategory)  # trainResult:Pc, np.log(Pw_c1), np.log(Pw_c0)
    # text the testData
    errNum = 0
    for emailVocab, idx in zip(wordArray[testIdx], testIdx):
        wordVec = setOfWord2Vec(vocabList, emailVocab)  # 使用词集模型
        testLabel = classifyNB(wordVec, trainResult[2], trainResult[1],
                               trainResult[0])
        if testLabel == 1 and idx < spamBegin:
            print('error:(1,0)')
            errNum += 1
        if testLabel == 0 and idx >= spamBegin:
            print('error:(0,1)')
            errNum += 1
    errRatio = (errNum / len(testIdx))
    return errRatio

# debug,fname-spamTest,describe-:
for
# param：

# content:


# return:
"""
postsList, trainCategory = loadDataSet()
vocabList = creatVocabList(postsList)
trainMatrix = np.zeros([len(trainCategory), len(vocabList)], int)

pc, p1, p0 = trainNB0(trainMatrix, trainCategory)
test1 = ['love', 'my', 'dalmation']
test2 = ['stupid', 'garbage']
t1 = setOfWord2Vec(vocabList, test1)
t2 = setOfWord2Vec(vocabList, test2)
classifyNB(t1, p0, p1, pc)
"""
print('错误率：%f;%%' % spam_test())
# main:
