from numpy import *
import random

#创建一个包含在所有文档中出现的不重复词的列表
def createVocabList(dataSet):
	vocabSet=set([]) #set()创建一个无序不重复元素集
	for document in dataSet:
		vocabSet=vocabSet | set(document)#求两个集合的并集
	return list(vocabSet)

#检查一篇文档中的词是否出现在元素表中
def setOfWords2Vec(vocabList, inputSet):
	returnVec=[0]*len(vocabList)
	for word in inputSet:
		if word in vocabList:
			returnVec[vocabList.index(word)]=1
		else:
			print("the word: %s is not in my Vocabulary!" % word)
	return returnVec

#求得条件概率
def trainNB0(trainMatrix,trainCategory):
	numTrainDocs=len(trainMatrix) #训练文档的个数
	numWords=len(trainMatrix[0]) #每一篇文档有多少词
	pAbusive=sum(trainCategory)/float(numTrainDocs) #侮辱性文档占总文档的比例
	p0Num=ones(numWords); p1Num=ones(numWords) #防止出现一个概率为0，
	p0Denom=2.0; p1Denom=2.0                   #最后乘机也为0的情况
	for i in range(numTrainDocs): #0-5
		if trainCategory[i]==1:
			p1Num+=trainMatrix[i]
			p1Denom+=sum(trainMatrix[i]) #归一化
		else:
			p0Num+=trainMatrix[i]
			p0Denom+=sum(trainMatrix[i])
	p1Vect=log(p1Num/p1Denom) #取对数，避免因子太小导致乘机为0的情况
	p0Vect=log(p0Num/p0Denom)
	return p0Vect, p1Vect, pAbusive

#计算后验概率进行分类
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
	p1=sum(vec2Classify*p1Vec)+log(pClass1) #向量元素对应相乘
	p0=sum(vec2Classify*p0Vec)+log(1.0-pClass1)
	if p1>p0:
		return 1
	else:
		return 0

#切分文本，并转化成小写字母，去掉少于两个字符的字符串
def textParse(bigString):
	import re
	listOfTokens=re.split(r'\\W*', bigString)
	return [tok.lower() for tok in listOfTokens if len(tok)>2]
	
#对贝叶斯垃圾邮件分类器进行自动化处理
def spamTest():
	docList=[];classList=[];fullTest=[]
	for i in range(1,26):
		wordList=textParse(open('email/spam/%d.txt' % i).read())
		docList.append(wordList)
		fullTest.extend(wordList)
		classList.append(1)
		# 将ham/23.txt中SciFinance?中的？删除
		wordList=textParse(open('email/ham/%d.txt' % i).read())
		docList.append(wordList)
		fullTest.extend(wordList)
		classList.append(0)
	vocabList=createVocabList(docList)
	trainingSet=list(range(50)) #trainingSet=range(50)出错，原因是range不返回数组对象，而是返回range对象
	testSet=[]
	for i in range(10):
		randIndex=int(random.uniform(0,len(trainingSet))) #random.uniform生成随机的一个数
		testSet.append(trainingSet[randIndex])  #从50封邮件中随机选择10封作为测试集
		del(trainingSet[randIndex]) #将选为测试集的邮件从训练集中删除
	trainMat=[];trainClasses=[]
	for docIndex in trainingSet: #训练
		trainMat.append(setOfWords2Vec(vocabList,docList[docIndex]))
		trainClasses.append(classList[docIndex])
	p0V,p1V,pSpam=trainNB0(array(trainMat), array(trainClasses))
	errorCount=0
	for docIndex in testSet: #测试
		wordVector=setOfWords2Vec(vocabList, docList[docIndex])
		if classifyNB(array(wordVector), p0V,p1V,pSpam) != classList[docIndex]:
			errorCount+=1
	print('the error rate is: ',float(errorCount)/len(testSet))
spamTest()