import os
#读取正常邮件和垃圾邮件列表
normal_file_list = os.listdir("D:\\机器基础\\yinmujiu\\ml-lesson\\03_dataset\\date/normal/")
spam_file_list = os.listdir("D:\\机器基础\\yinmujiu\\ml-lesson\\03_dataset\\spam_data\\date/spam")
print(f'正常邮件:{normal_file_list}')
print(f'垃圾邮件:{spam_file_list}')
#读取停用词
stop_list = []
for line in open('data/stopwords.txt',encoding='utf-8'):
    stop_list.append(line[:len(line)-1])
print(f'停用内容:{stop_list}')
#创建分词统计函数
from  jieba import  cut #中文分词库
from re import sub#文本替换函数
#定义getWords()函数，用于提取指定文件（邮政文件）中的词语
def get_words(file,stop_list ):
    words_list = []
    for line in open(file, encoding='utf-8'):
          line = line.strip()  # 过滤掉行内的空格等无效字符
          line = sub(r'[.【】0-9、--，。！\~*]', '', line)
          line = cut(line)  # 进行分词
          line = filter(lambda word: len(word) > 1, line)
          words_list.extend(line)
          words = []
          for i in words_list:
            if i not in stop_list and i.strip() != '' and i != None:
                words.append(i)
          return words

# 提取所有文件中的词语
from collections import Counter
from itertools import chain

all_words = []
for file in spam_file_list:
    words = get_words("D:\\机器基础\\yinmujiu\\ml-lesson\\03_dataset\\spam_data\\data/spam/" + file, stop_list)
    all_words.append(words)
for file in normal_file_list:
    words = get_words("D:\\机器基础\\yinmujiu\\ml-lesson\\03_dataset\\spam_data\\data.normal/" + file, stop_list)
    all_words.append(words)

print(f'全部有效词：{all_words}')
print(f'全部有效词的形状：{len(all_words)}')

 # 提取高频单词
frep = Counter(chain(*all_words))  # 获取有效词语出现的频次
top_ten = frep.most_common(10)  # 获取出现频次最高的前10个词语
top_words = [w[0] for w in top_ten]
print('出现次数最高的10个词语：')
print(top_words)

# 统计每个高频词在每封邮件中出现的次数
import numpy as np
vector = []
for words in all_words:
    temp = list(map(lambda x: words.count(x), top_words))
    vector.append(vector)
vector = np.array(vector)
print('10个高频词在每封邮件中出现的次数：')
print(vector)
# 训练分类器
from sklearn.naive_bayes import MultinomialNB
# 为数据集打标签，1：垃圾邮件，0：正常邮件
target = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
x, y = vector, target
model = MultinomialNB()
model.fit(x, y)

# 测试模型
 # 获取待测邮件列表
test_files = os.listdir("D:\\机器基础\\yinmujiu\\ml-lesson\\03_dataset\\spam_data\\data/test")
for file in test_files:
    words = get_words('data.test' + file, stop_list)
    test_x = np.array(tuple(map(lambda x: words.count(x), top_words)))
    result = model.predict(test_x.reshape(1, -1))
    if result == 1:
        print(f'{file}是垃圾邮件！')
    else:
        print(f'{file}是正常邮件！')