#!user/bin/python
# -*- coding:UTF-8 -*-

"""
*************************************
  算法要求：朴素贝叶斯
      P(Ci|W1,W2,W3...Wn)=P(W1,W2...Wn|Ci)P(Ci)/P(W1,W2...Wn)
      在应用中P(W1,W2...Wn)都存在，则都忽略不计算，即公式简化为：
          P(Ci|W1,W2,W3...Wn)=P(W1,W2...Wn|Ci)P(Ci)

      P(Ci|W1,W2,W3...Wn)为第i个文本类别出现概率
      P(W1,W2...Wn|Ci)由训练器获得，即在训练样本中，Ci类样本中特征(W1,W2...Wn)的出现概率
      P(Ci)由训练器获得得，即训练样本中Ci类出现的概率
      P(W1,W2...Wn)由训练器获得，即训练样本中特征(W1,W2...Wn)的出现概率
      同时在这里我们相信特征之间相互独立，即
          P(W1,W2...Wn|Ci)=P(W1|Ci)*P(W2|Ci)*...*P(Wn|Ci)
          P(W1,W2...Wn)=P(W1)*P(W2)*...*P(Wn)
  **********************************
  算法模型：1、多项式模型(multinomial model)
              以词汇作为统计单位，不忽略各个特征词汇出现次数.

              文档d=(W1,W2,…,Wn)，Wn是该文档中出现过的单词，允许重复:
                  P(C)= 类C下单词总数/整个训练样本的单词总数
                  P(Wn|C)=(类C下单词Wn在各个文档中出现过的次数之和+1)/(类c下单词总数+|V|)
                      ****************
                      *模型平滑型处理: *
                      *              ************************************
                      *V是训练样本的单词表(即抽取单词，单词出现多次，只算一个) *
                      *|V|则表示训练样本包含多少种单词。                    *
                      ***************************************************
                  P(Wn|c)可以看作是单词Wn在证明d属于类c上提供了多大的证据
                  P(c)则可以认为是类别c在整体上占多大比例(有多大可能性)。

           2、伯努利模型(Bernoulli model)
               以文档作为统计单位，忽略各个特征词汇频次，只统计特征词汇在文档中是否存在

                  P(c)= 类c下文件总数/整个训练样本的文件总数
                  P(Wn|c)=(类c下包含单词Wn的文件数+1)/(类c下文件总数+2)

**************************************
"""

import os
import jieba
from concurrent.futures import ThreadPoolExecutor, wait


class TrainBayes:
    def __init__(self):
        # 多项式模型参数:
        self.spam_words_count = 0  # 垃圾邮件中词汇总数量
        self.ham_words_count = 0  # 正常邮件中词汇总数量
        self.spam_words_dict = {}  # 垃圾邮件中词汇：词频(词汇数量)
        self.ham_words_dict = {}  # 正常邮件中词汇：词频
        self.all_words_set = set()  # 所有邮件中的词汇

        # 伯努利模型参数:
        self.spam_email_count = 0  # 垃圾邮件数量
        self.ham_email_count = 0  # 正常邮件数量
        self.spam_email_dict = {}  # 垃圾邮件中词汇：词频(邮件数量)
        self.ham_email_dict = {}  # 正常邮件中词汇：词频

        # 多项式模型训练结果
        self.spam_words_percent_dict = {}  # P(词汇|垃圾)
        self.ham_words_percent_dict = {}  # P(词汇|正常)
        self.spam_words_percent_all = 0  # P(垃圾)
        self.ham_words_percent_all = 0  # P(正常)

        # 伯努利模型训练结果
        self.spam_email_percent_dict = {}  # P(词汇|垃圾)
        self.ham_email_percent_dict = {}  # P(词汇|正常)
        self.spam_email_percent_all = 0  # P(垃圾)
        self.ham_email_percent_all = 0  # P(正常)

        self.index_file_url = ''  # 训练集邮件的目录文件路径

    def get_train_index(self):
        # *******************************************************
        # 函数名：get_train_index()
        # 功能：读取训练集文件的目录文件，获得训练集中邮件的文件路径
        # 返回值：返回一个元组，元组中含两个列表,
        #    分别存储了垃圾邮件和正常的文件路径
        # *******************************************************
        spam_index = []  # 垃圾邮件路径
        ham_index = []
        self.index_file_url = input('请输入训练集文件的目录文件所在路径:')
        if not os.path.exists(self.index_file_url):
            print('输入的路径文件不存在！')
        else:
            with open(self.index_file_url) as index_file:  # 打开训练集文件的目录文件
                while True:
                    line = index_file.readline()  # 按行读
                    if line == '':
                        break
                    line = line.strip()  # 去除字符串尾部‘\n’
                    flag_index = line.split(' ')
                    if flag_index[0] == 'spam':
                        spam_index.append(flag_index[1])
                    else:
                        ham_index.append(flag_index[1])
        return spam_index, ham_index

    def __read_spam_thread(self, index_files_url, index):
        with open(index_files_url + '/' + index, 'rb') as email:
            print('spam ' + index)
            email_content = email.read().decode('gb2312', errors='replace')
            cut_place = email_content.find('\n\n')
            # email_content_head = email_content[:cut_place].strip()
            email_content_body = email_content[cut_place:].strip()
            # 邮件头部信息和正文部分分开处理

            email_body_words = jieba.cut(email_content_body)  # 切割正文得到词汇生成器
            email_body_words = list(email_body_words)
            for i in range(0, len(email_body_words)):
                email_body_words[i] = email_body_words[i].lower()
            self.spam_words_count += len(email_body_words)  # 计算邮件词汇数
            email_body_words_set = set(email_body_words)  # 得到正文词汇集合
            self.all_words_set = self.all_words_set | email_body_words_set
            # 合并至所有词汇集合

            for word in email_body_words:
                if word in self.spam_words_dict:
                    self.spam_words_dict[word] += 1
                else:
                    self.spam_words_dict[word] = 1
            # 多项式模型填参

            for word in email_body_words_set:
                if word in self.spam_email_dict:
                    self.spam_email_dict[word] += 1
                else:
                    self.spam_email_dict[word] = 1
            # 伯努利模型填参

    def __read_ham_thread(self, index_files_url, index):
        with open(index_files_url + '/' + index, 'rb') as email:
            print('ham ' + index)
            email_content = email.read().decode('gb2312', errors='strict')
            cut_place = email_content.find('\n\n')
            # email_content_head = email_content[:cut_place].strip()
            email_content_body = email_content[cut_place:].strip()
            # 邮件头部信息和正文分开处理

            email_body_words = jieba.cut(email_content_body)
            email_body_words = list(email_body_words)
            for i in range(0, len(email_body_words)):
                email_body_words[i] = email_body_words[i].lower()
            self.ham_words_count += len(email_body_words)
            email_body_words_set = set(email_body_words)
            self.all_words_set = self.all_words_set | email_body_words_set
            # 计算邮件词汇数 合并词汇集合

            for word in email_body_words:
                if word in self.ham_words_dict:
                    self.ham_words_dict[word] += 1
                else:
                    self.ham_words_dict[word] = 1

            for word in email_body_words_set:
                if word in self.ham_email_dict:
                    self.ham_email_dict[word] += 1
                else:
                    self.ham_email_dict[word] = 1

    def set_train_value(self, spam_ham_index):
        # *******************************
        # 函数名：set_train_index(index)
        # 函数所需参数：元组（垃圾邮件地址列表，正常邮件地址列表）
        # 功能：读取所有邮件，设置好多项式和伯努利模型的参数（全局变量）
        # 返回值：无
        # *******************************

        spam_index = spam_ham_index[0]
        ham_index = spam_ham_index[1]
        # 获得邮件路径

        self.spam_email_count = len(spam_index)
        self.ham_email_count = len(ham_index)
        # 获得邮件封数

        index_file_url_list = self.index_file_url.split('/')
        index_file_url_list.pop()
        index_files_url = '/'.join(index_file_url_list)
        # 重构路径，去除了目录文件路径中文件名，得到目录文件所在目录

        pool = ThreadPoolExecutor(max_workers=10)
        spam_futures = []
        ham_futures = []
        print('开始读取垃圾邮件集：')
        # 处理垃圾邮件:
        for index in spam_index:
            try:
                future = pool.submit(self.__read_spam_thread,
                                     (index_files_url, index))
                spam_futures.append(future)
            except Exception as err:
                print(str(err))
                continue
        wait(spam_futures)

        print('开始读取正常邮件集：')
        # 处理正常邮件:
        for index in ham_index:
            try:
                future = pool.submit(self.__read_ham_thread,
                                     (index_files_url, index))
                ham_futures.append(future)
            except Exception as err:
                print(str(err))
                continue
        wait(ham_futures)

    def train_multinomial(self):
        # **********************************
        # 函数名：train_mutinomial()
        # 参数：无
        # 功能：获取多项式模型贝叶斯公式的计算参数
        # **********************************

        self.spam_words_percent_all = \
            (self.spam_words_count+1) / \
            (self.spam_words_count+self.ham_words_count)
        self.ham_words_percent_all = \
            self.ham_words_count / \
            (self.spam_words_count+self.ham_words_count)
        # P(C)= 类C下单词总数/整个训练样本的单词总数

        for word in self.spam_words_dict:
            self.spam_words_percent_dict[word] = \
                (self.spam_words_dict[word]+1) / \
                (self.spam_words_count+len(self.all_words_set))

        for word in self.ham_words_dict:
            self.ham_words_percent_dict[word] = \
                (self.ham_words_dict[word]+1) / \
                (self.ham_words_count+len(self.all_words_set))
        # P(Wn|C)=(类C下单词Wn在各个文档中出现过的次数之和+1)/(类c下单词总数+|V|)

    def train_bernoulli(self):
        # **********************************
        # 函数名：train_bernoulli()
        # 参数：无
        # 功能：获取伯努利模型贝叶斯公式的计算参数
        # **********************************

        self.spam_email_percent_all = \
            self.spam_email_count / \
            (self.spam_email_count+self.ham_email_count)
        self.ham_email_percent_all = \
            self.ham_email_count / \
            (self.spam_email_count+self.ham_email_count)
        # P(c)= 类c下文件总数/整个训练样本的文件总数

        for word in self.spam_email_dict:
            self.spam_email_percent_dict[word] = \
                (self.spam_email_dict[word]+1) / \
                (self.spam_email_count+2)

        for word in self.ham_email_dict:
            self.ham_email_percent_dict[word] = \
                (self.ham_email_dict[word]+1) / \
                (self.ham_email_count+2)
        # P(Wn|c)=(类c下包含单词Wn的文件数+1)/(类c下文件总数+2)

    def get_train_result(self):
        multinomial_result = {
            'spam_dict': self.spam_words_percent_dict,
            'ham_dict': self.ham_words_percent_dict,
            'spam_all': self.spam_words_percent_all,
            'ham_all': self.ham_words_percent_all
        }
        bernoulli_result = {
            'spam_dict': self.spam_email_percent_dict,
            'ham_dict': self.ham_email_percent_dict,
            'spam_all': self.spam_email_percent_all,
            'ham_all': self.ham_email_percent_all
        }
        return multinomial_result, bernoulli_result
