#第一步：加载邮箱列表和停用词文件
import os
# ------------------- 路径配置 -------------------
# 1. 获取当前脚本所在文件夹路径
base_dir = os.path.dirname(os.path.abspath(__file__))
# 2. 拼接数据根路径（item5/item5-ss-data）
data_dir = os.path.join(base_dir, "item5", "item5-ss-data")
# 3. 定义路径前缀（直接用这个前缀替换你原来的 "item5/item5-ss-data/"）
path_prefix = data_dir + os.sep  # os.sep 自动适配 Windows/Linux 路径分隔符
# -------------------------------------------------------

# 获取正常邮件和垃圾邮件的文件列表
normalFileList = os.listdir(path_prefix + "normal/")
spamFileList = os.listdir(path_prefix + "spam/")
print("正常邮件的文件列表", normalFileList)
print("垃圾邮件的文件列表", spamFileList)

# 获取停用词表
stopList = []
for line in open(path_prefix + "stopwords.txt", encoding='utf-8'):
    stopList.append(line[:len(line)-1])
print("停用词文件内容：", stopList)



#第二步：编写文件，加载函数
from jieba import cut
from re import sub
def getWords(file, stop_list):
    words_list = []
    for line in open(file, encoding="utf-8"):
        line = line.strip()
        line = sub(r'[.【】0-9、——，。！\~*]', '', line)
        line = cut(line)
        line = filter(lambda word: len(word) > 1, line)
        words_list.extend(line)
        words = []
        for i in words_list:
            if i not in stopList and i.strip() != '' and i is not None:
                words.append(i)
    return words



#第三步：数据预处理
from collections import Counter
from itertools import chain

# 提取训练集词语
allwords = []
for spamfile in spamFileList:
    words = getWords(path_prefix + "spam/" + spamfile, stopList)
    allwords.append(words)
for normalfile in normalFileList:
    words = getWords(path_prefix + "normal/" + normalfile, stopList)
    allwords.append(words)

print(f"训练集中所有的有效词语列表：{allwords}")

#提取训练集中出现频次最高的前10个词语
frep = Counter(chain(*allwords))     #获取有效词语出现的频次
topTen = frep.most_common(10)        #获取出现频次最高的前10个词语和对应的频次
topWords = [w[0] for w in topTen]    #获取出现频次最高的前10个词语
print("训练集中出现频次最高的前10个词语:")
print(topWords)

import numpy as np
# 构建特征向量
vector = []
for words in allwords:
    temp = list(map(lambda x: words.count(x), topWords))
    vector.append(temp)
vector = np.array(vector)
print("10个高频词语在每封邮件中出现的次数：")
print(vector)




#第四步：训练分类器
from sklearn.naive_bayes import MultinomialNB
# 训练模型
target=np.array([1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0])
x,y=vector,target
#建立多项式朴素贝叶斯模型并进行训练
model=MultinomialNB()
model.fit(x,y)




#第五步：测试邮件
# 测试集预测
test = os.listdir(path_prefix + "test")
for testFile in test:
    words = getWords(path_prefix + "test/" + testFile, stopList)
    test_x = np.array(tuple(map(lambda x: words.count(x), topWords)))
    result = model.predict(test_x.reshape(1, -1))
    if result == 1:
        print(f'"{testFile}"是垃圾邮件')
    else:
        print(f'"{testFile}"是正常邮件')