import re
import random
import os
import jieba  # 中文分词库
from numpy import array, ones, log

# 辅助函数
def createVocabList(docs):
    """创建词汇表"""
    vocabSet = set()
    for doc in docs:
        vocabSet |= set(doc)
    return list(vocabSet)

def setOfWords2Vec(vocabList, doc):
    """创建词条向量"""
    vec = [0] * len(vocabList)
    for word in doc:
        if word in vocabList:
            vec[vocabList.index(word)] = 1
    return vec

def trainNB0(trainMatrix, trainCategory,vocabList):
    """训练朴素贝叶斯分类器"""
    numDocs = len(trainMatrix)
    numWords = len(trainMatrix[0])
    p1 = sum(trainCategory) / float(numDocs)  # 垃圾邮件的概率
    
    # 使用拉普拉斯平滑
    p0Num = ones(numWords)
    p1Num = ones(numWords)
    p0Denom = len(vocabList) #这里默认加上词汇表大小
    p1Denom = len(vocabList)
    
    for i in range(numDocs):
        if trainCategory[i] == 1:  # 垃圾邮件
            p1Num += trainMatrix[i]
            p1Denom += sum(trainMatrix[i])
        else:  # 正常邮件
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])
    
    # 取对数防止下溢
    p1V = log(p1Num / p1Denom)
    p0V = log(p0Num / p0Denom)
    
    return p0V, p1V, p1

def classifyNB(vec, p0V, p1V, pClass1):
    """分类函数"""
    p1 = sum(vec * p1V) + log(pClass1)
    p0 = sum(vec * p0V) + log(1.0 - pClass1)
    return 1 if p1 > p0 else 0

# 修改文本解析函数以支持中文
def textParse(bigString):
    """中文文本解析函数"""
    # 使用jieba进行中文分词
    words = jieba.cut(bigString)
    
    # 过滤停用词和短词
    stopwords = load_stopwords()  # 加载停用词表
    tokens = [
        word.strip() 
        for word in words 
        if len(word.strip()) > 1  # 保留长度大于1的词
        and word.strip() not in stopwords  # 过滤停用词
        and not re.match(r'^\d+$', word)  # 过滤纯数字
    ]
    return tokens

def load_stopwords(file_path='stopwords.txt'):
    """加载停用词表"""
    stopwords = set()
    try:
        if os.path.exists(file_path):
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    stopwords.add(line.strip())
        else:
            # 如果没有停用词文件，使用内置的基本停用词
            stopwords = set([
                '的', '了', '在', '是', '我', '有', '和', '就', 
                '不', '人', '都', '一', '一个', '上', '也', '很', 
                '到', '说', '要', '去', '你', '会', '着', '没有', 
                '看', '好', '自己', '这', '那', '为', '什么', '呢', 
                '吗', '啊', '哦', '呀', '吧', '嗯', '呃', '这个', 
                '那个', '这样', '那样', '我们', '你们', '他们', '它', 
                '它们', '而且', '但是', '如果', '因为', '所以', '然后', 
                '虽然', '可是', '不过', '即使', '无论', '等等', '...'
            ])
    except Exception as e:
        print(f"加载停用词表错误: {str(e)}")
    
    return stopwords

def spamTest():
    """垃圾邮件测试函数"""
    docList = []
    classList = []
    fullText = []
    
    # 设置基础路径（根据实际位置修改）
    base_path = r"D:\email"  # Windows路径示例
    
    # 加载垃圾邮件(spam)和正常邮件(ham)
    for i in range(1, 6):
        try:
            # 使用os.path.join构建路径（跨平台）
            spam_path = os.path.join(base_path, "spam", f"{i}.txt")
            ham_path = os.path.join(base_path, "ham", f"{i}.txt")
            
            # 处理垃圾邮件，使用utf-8编码
            with open(spam_path, 'r', encoding='utf-8') as f:
                content = f.read()
            wordList = textParse(content)
            docList.append(wordList)
            fullText.extend(wordList)
            classList.append(1)  # 垃圾邮件标记为1
            
            # 处理正常邮件，使用utf-8编码
            with open(ham_path, 'r', encoding='utf-8') as f:
                content = f.read()
            wordList = textParse(content)
            docList.append(wordList)
            fullText.extend(wordList)
            classList.append(0)  # 正常邮件标记为0
        except Exception as e:
            print(f"Error loading file {i}: {str(e)}")
            continue
    
    # 创建词汇表
    vocabList = createVocabList(docList)
    
    # 准备训练集和测试集
    trainingSet = list(range(10))
    testSet = []
    
    # 随机选择2个样本作为测试集
    for i in range(2):
        randIndex = int(random.uniform(0, len(trainingSet)))
        testSet.append(trainingSet[randIndex])
        del trainingSet[randIndex]
    
    # 准备训练数据
    trainMat = []
    trainClasses = []
    
    for docIndex in trainingSet:
        # 将文档转换为词向量
        trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    
    # 训练朴素贝叶斯分类器
    p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses),vocabList)
    
    # 测试分类器
    errorCount = 0
    
    for docIndex in testSet:
        wordVector = setOfWords2Vec(vocabList, docList[docIndex])
        # 进行分类预测并与真实标签比较
        if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
    
    # 计算并输出错误率
    errorRate = float(errorCount) / len(testSet) if testSet else 0
    print(f'分类错误率: {errorRate:.4f}')
    return errorRate


def extract_words(text, keep_hyphens=True):
    """
    从英文文本中提取所有单词
    
    参数:
    text: 要处理的文本字符串
    keep_hyphens: 是否保留连字符复合词（如 state-of-the-art）
    
    返回:
    单词列表（小写形式）
    """
    # 修正后的正则表达式模式
    if keep_hyphens:
        # 正确匹配带连字符和撇号的单词
        pattern = r"\b[a-zA-Z]+(?:['-][a-zA-Z]+)*\b"
    else:
        # 仅匹配纯字母单词
        pattern = r'\b[a-zA-Z]+\b'
    
    words = re.findall(pattern, text)
    return [word.lower() for word in words]


if __name__ == "__main__":
    mySente = 'This book is the best book on Python or M.L. I have ever laid eyes upon.'
    words=extract_words(mySente)
    print(words) 
    # 初始化jieba分词器
    jieba.initialize()
    
    # 可以添加用户词典（可选）
    # jieba.load_userdict("user_dict.txt")
    
    # 运行测试
    spamTest()
    
