import bayes 
import re #正则表达式 
import feedparser #解析RSS的库

listOPosts,listClasses = bayes.loadDataSet()
myVocaList = bayes.createVocabList(listOPosts)
print("文档词条：\n",myVocaList)
print("侮辱性或非侮辱性词条（要看对应的标签）：\n",listOPosts[0])
print("将文档转换为向量:\n",bayes.setOfWords2Vec(myVocaList,['dog']))

#计算侮辱性词条的概率
trainMat = []
for postinDoc in listOPosts:
	trainMat.append(bayes.setOfWords2Vec(myVocaList,postinDoc))
	
print("trainMat:\n",trainMat)

p0V,p1V,pAb = bayes.trainNB0(trainMat,listClasses)
print("pAb:\n",pAb)

print("分类的一个例子")
bayes.testingNB()

#电子邮件的例子
mySent = 'This book is the best book on Python or M.L. I have ever laideyes upon'
mySentSet = mySent.split();#切割字符串
print("电子邮件的例子")
print(mySentSet)

listOfTokens = re.split(r'\W',mySent)#\W匹配非字母和下划线
print(listOfTokens)

print("电子邮件的例子:")
bayes.spamTest()

#f = open("email/ham/23.txt",encoding='gbk');
#print(f.read());

#使用朴素贝叶斯分类器从个人广告中获取区域倾向
#比较两个城市的人们在广告用词上的不同
ny = feedparser.parse('https://sports.yahoo.com/nba/teams/hou/rss.xml')
sf = feedparser.parse('http://www.nasa.gov/rss/dyn/image_of_the_day.rss')
vocabList,pSF,pNY = bayes.localWords(ny,sf)
print("vocabList,pSF,pNY:\n",vocabList,pSF,pNY)
#通过beyas分类得知这两个网站的常用词对比,准确率一般般
bayes.getTopWords(ny,sf)























