import feedparser
import re
import random
import os
import jieba  # 中文分词库
from numpy import array, ones, log

# 辅助函数
def createVocabList(docs):
    """创建词汇表"""
    vocabSet = set()
    for doc in docs:
        vocabSet |= set(doc)
    return list(vocabSet)

def setOfWords2Vec(vocabList, doc):
    """创建词条向量"""
    vec = [0] * len(vocabList)
    for word in doc:
        if word in vocabList:
            vec[vocabList.index(word)] = 1
    return vec

def trainNB0(trainMatrix, trainCategory,vocabList):
    """训练朴素贝叶斯分类器"""
    numDocs = len(trainMatrix)
    numWords = len(trainMatrix[0])
    p1 = sum(trainCategory) / float(numDocs)  # 垃圾邮件的概率
    
    # 使用拉普拉斯平滑
    p0Num = ones(numWords)
    p1Num = ones(numWords)
    p0Denom = len(vocabList) #这里默认加上词汇表大小
    p1Denom = len(vocabList)
    
    for i in range(numDocs):
        if trainCategory[i] == 1:  # 垃圾邮件
            p1Num += trainMatrix[i]
            p1Denom += sum(trainMatrix[i])
        else:  # 正常邮件
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])
    
    # 取对数防止下溢
    p1V = log(p1Num / p1Denom)
    p0V = log(p0Num / p0Denom)
    
    return p0V, p1V, p1

def classifyNB(vec, p0V, p1V, pClass1):
    """分类函数"""
    p1 = sum(vec * p1V) + log(pClass1)
    p0 = sum(vec * p0V) + log(1.0 - pClass1)
    return 1 if p1 > p0 else 0

# 修改文本解析函数以支持中文
def textParse(bigString):
    """中文文本解析函数"""
    # 使用jieba进行中文分词
    words = jieba.cut(bigString)
    
    # 过滤停用词和短词
    stopwords = load_stopwords()  # 加载停用词表
    tokens = [
        word.strip() 
        for word in words 
        if len(word.strip()) > 1  # 保留长度大于1的词
        and word.strip() not in stopwords  # 过滤停用词
        and not re.match(r'^\d+$', word)  # 过滤纯数字
    ]
    return tokens

def load_stopwords(file_path='stopwords.txt'):
    """加载停用词表"""
    stopwords = set()
    try:
        if os.path.exists(file_path):
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    stopwords.add(line.strip())
        else:
            # 如果没有停用词文件，使用内置的基本停用词
            stopwords = set([
                '的', '了', '在', '是', '我', '有', '和', '就', 
                '不', '人', '都', '一', '一个', '上', '也', '很', 
                '到', '说', '要', '去', '你', '会', '着', '没有', 
                '看', '好', '自己', '这', '那', '为', '什么', '呢', 
                '吗', '啊', '哦', '呀', '吧', '嗯', '呃', '这个', 
                '那个', '这样', '那样', '我们', '你们', '他们', '它', 
                '它们', '而且', '但是', '如果', '因为', '所以', '然后', 
                '虽然', '可是', '不过', '即使', '无论', '等等', '...'
            ])
    except Exception as e:
        print(f"加载停用词表错误: {str(e)}")
    
    return stopwords

# 程序清单4-6 RSS源分类器及高频词去除函数 (Python 3 版本)

def calcMostFreq(vocabList, fullText):
    import operator
    freqDict = {}
    for token in vocabList:
        freqDict[token] = fullText.count(token)
    # 使用 items() 替代 iteritems()
    sortedFreq = sorted(freqDict.items(), key=operator.itemgetter(1), reverse=True)
    return sortedFreq[:30]

def bagOfWords2VecMN(vocabList, doc):
    """创建词条向量"""
    vec = [0] * len(vocabList)
    for word in doc:
        if word in vocabList:
            vec[vocabList.index(word)] = 1
    return vec

def localWords(feed1, feed0):
    import feedparser
    import random
    from numpy import array
    # 假设以下函数已在其他地方定义
    # from your_module import textParse, createVocabList, bagOfWords2VecMN, trainNB0, classifyNB
    
    docList = []; classList = []; fullText = []
    minLen = min(len(feed1['entries']), len(feed0['entries']))
    for i in range(minLen):
        wordList = textParse(feed1['entries'][i]['summary'])
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(1)  # 使用数字 1 替代字母 l
        
        wordList = textParse(feed0['entries'][i]['summary'])
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)  # 添加缺失的类别值 0
    
    vocabList = createVocabList(docList)
    top30Words = calcMostFreq(vocabList, fullText)
    
    for pairW in top30Words:
        if pairW[0] in vocabList: 
            vocabList.remove(pairW[0])
    
    # 创建列表而非 range 对象
    trainingSet = list(range(2 * minLen))
    testSet = []
    
    for i in range(2):
        randIndex = int(random.uniform(0, len(trainingSet)))
        testSet.append(trainingSet[randIndex])
        del trainingSet[randIndex]  # 修正多余的括号
    
    trainMat = []; trainClasses = []
    for docIndex in trainingSet:
        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    
    p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses),vocabList)
    errorCount = 0
    
    for docIndex in testSet:
        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
        if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
    
    # 使用 print() 函数
    print('the error rate is: ', float(errorCount) / len(testSet))
    return vocabList, p0V, p1V


def analysisRss():
    # 解析 RSS 源
    feed = feedparser.parse("https://sspai.com/feed")

    # 显示源的基本信息
    print("Feed 标题:", feed.feed.title)
    print("Feed 描述:", feed.feed.description)
    print("Feed 链接:", feed.feed.link)
    print("\n最新文章:")

# 显示最新文章
    for i, entry in enumerate(feed.entries[:5]):
        print(f"\n文章 {i+1}: {entry.title}")
        print("发布时间:", entry.get("published", "无时间信息"))
        print("摘要:", entry.get("summary", "无摘要"))
        print("链接:", entry.link)

if __name__ == "__main__":
    analysisRss()
    ny = feedparser.parse('https://sspai.com/feed')
    sf = feedparser.parse('https://www.ruanyifeng.com/blog/atom.xml')
    vocabList, pSF, pNY = localWords(ny, sf)