from KNN.NaiveBayes.Bayes import *
from numpy import *
"""
构建词袋模型
"""
def bagOfWords2VecMN(vocabList, inputSet):
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] += 1
    return returnVec
"""
    接受一个大字符串 分解为字符列表 把长度小于2的去掉
"""
def textParse(bigString):
    import re
    listOfTokens = re.split(r'\\W*', bigString)
    return [tok.lower() for tok in listOfTokens if len(tok) > 2]

"""
对贝叶斯垃圾邮件分类器进行自动化处理
"""
def spamTest():
    docList = []; classList = []; fullText = []
    for i in range(1, 26):
        wordList = textParse(open('spam/%d.txt' % i).read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(1)
        wordList = textParse(open('ham/%d.txt' % i).read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)
    vocabList = createVocabList(docList)
    trainingSet = list(range(50)); testSet=[]
    for i in range(10):
        randIndex = int(random.uniform(0,len(testSet)))
        testSet.append(trainingSet[randIndex])
        del(trainingSet[randIndex])
    trainMat = []; trainClasses = []
    for docIndex in trainingSet:
        trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    print('trainClasses: ', trainClasses)
    p0V, p1V, pSpam = trainNB0(trainMat, trainClasses)
    print('p0V: ', p0V)
    print('p01: ', p1V)
    print('pSpam: ', pSpam)
    errorCount = 0
    for docIndex in testSet:
        wordVector = setOfWords2Vec(vocabList, docList[docIndex])
        if classifyNB(wordVector, p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
    print('the error rate is ', float(errorCount)/len(testSet))
spamTest()
import feedparser
ny = feedparser.parse('http://newyork.craigslist.org/stp/index.rss')
print(len(ny['entries']))
