# -*- encoding: utf-8 -*-
from collections import defaultdict
import os
import re
import jieba

normal_email_list = os.listdir('./../data/normal')
spam_email_list = os.listdir('./../data/spam')
test_email_list = os.listdir('./../data/test')

normal_email_count = len(normal_email_list)
spam_email_count = len(spam_email_list)

normal_email_probability = normal_email_count / (normal_email_count + spam_email_count)
spam_email_probability = spam_email_count / (normal_email_count + spam_email_count)

normal_word_dict = defaultdict(int)
spam_word_dict = defaultdict(int)

stop_word_list = []
for line in open(r"../data/中文停用词表.txt"):
    stop_word_list.append(line[:len(line) - 1])


def get_email_word_list(file):
    """获取每封邮件的分词列表"""
    res_list = []
    for line in open(file):
        # 过滤掉非中文字符
        rule = re.compile(r"[^\u4e00-\u9fa5]")
        line = rule.sub("", line)
        # 将每封邮件出现的词保存在wordsList中
        res_list.extend(list(jieba.cut(line)))
    return list(set(res_list))


for fileName in normal_email_list:
    res_list = get_email_word_list("./../data/normal/" + fileName)
    for i in res_list:
        if i not in stop_word_list and i.strip() != '' and i is not None:
            normal_word_dict[i] += 1

for fileName in spam_email_list:
    res_list = get_email_word_list("./../data/spam/" + fileName)
    for i in res_list:
        if i not in stop_word_list and i.strip() != '' and i is not None:
            spam_word_dict[i] += 1

normal_word_dict = dict(normal_word_dict)
spam_word_dict = dict(spam_word_dict)


def cal_bayes(spam_word_probability):
    spam_likelihood = 1
    normal_likelihood = 1
    for word, p_w in spam_word_probability.items():
        print(word + "/" + str(p_w))
        spam_likelihood *= p_w
        normal_likelihood *= (1 - p_w)
    p = spam_likelihood / (spam_likelihood + normal_likelihood)
    if p > 0.9:
        return 1
    else:
        return 0


test_result_dict = dict()
for fileName in test_email_list:
    res_list = get_email_word_list("./../data/test/" + fileName)
    spam_word_probability = dict()
    for word in res_list:
        if word in normal_word_dict and word in spam_word_dict:
            p_n = normal_word_dict[word] / normal_email_count
            p_s = spam_word_dict[word] / spam_email_count
            p_w = p_s / (p_s + p_n)
            spam_word_probability[word] = p_w
        elif word in normal_word_dict and word not in spam_word_dict:
            p_n = normal_word_dict[word] / normal_email_count
            p_s = 0.001
            p_w = p_s / (p_s + p_n)
            spam_word_probability[word] = p_w
        elif word not in normal_word_dict and word in spam_word_dict:
            p_s = spam_word_dict[word] / spam_email_count
            p_n = 0.001
            p_w = p_s / (p_s + p_n)
            spam_word_probability[word] = p_w
        else:
            p_w = 0.4
            spam_word_probability[word] = p_w
    test_result_dict[fileName] = cal_bayes(spam_word_probability)

print(test_result_dict)


def cal_accuracy(test_result_dict):
    right_count = 0
    error_count = 0
    for name, classify in test_result_dict.items():
        if (int(name) < 1000 and classify == 0) or (int(name) > 1000 and classify == 1):
            right_count += 1
        else:
            error_count += 1
    return right_count / (right_count + error_count)


print(cal_accuracy(test_result_dict))
