# coding=utf8
import numpy as np


def textParser(text):
    """
    对SMS预处理，去除空字符串，并统一小写
    :param text:
    :return:
    """
    import jieba
    words = list(jieba.cut(text))
    return words


def loadSMSData(fileName):
    """
    加载SMS数据
    :param fileName:
    :return:
    """
    f = open(fileName, 'r', encoding='utf8')
    classCategory = []  # 类别标签，1表示是广告，0表示正常
    smsWords = []
    for line in f.readlines():
        if line == '':
            continue
        linedatas = line.strip().split('::')
        if linedatas[0] == 'ham':
            classCategory.append(0)
        elif linedatas[0] == 'spam':
            classCategory.append(1)
        # 切分文本
        words = textParser(linedatas[1])
        smsWords.append(words)
    return smsWords, classCategory


def createVocabularyList(smsWords):
    """
    创建语料库
    :param smsWords:
    :return:
    """
    vocabularySet = set()
    for words in smsWords:
        vocabularySet = vocabularySet | set(words)
    vocabularyList = list(vocabularySet)
    return vocabularyList


def getVocabularyList(fileName):
    """
    从词汇列表文件中获取语料库
    :param fileName:
    :return:
    """
    with open(fileName, 'r', encoding='utf8') as fr:
        vocabularyList = fr.read().split('\t')
    return vocabularyList


def setOfWordsToVecTor(vocabularyList, smsWords):
    """
    SMS内容匹配预料库，标记预料库的词汇出现的次数
    :param vocabularyList:
    :param smsWords:
    :return:
    """
    vocabMarked = [0] * len(vocabularyList)
    for smsWord in smsWords:
        if smsWord in vocabularyList:
            vocabMarked[vocabularyList.index(smsWord)] += 1
    return vocabMarked


def setOfWordsListToVecTor(vocabularyList, smsWordsList):
    """
    将文本数据的二维数组标记
    :param vocabularyList:
    :param smsWordsList:
    :return:
    """
    vocabMarkedList = []
    for i in range(len(smsWordsList)):
        vocabMarked = setOfWordsToVecTor(vocabularyList, smsWordsList[i])
        vocabMarkedList.append(vocabMarked)
    return vocabMarkedList


def trainingNaiveBayes(trainMarkedWords, trainCategory):
    """
    训练数据集中获取语料库中词汇的spamicity：P（Wi|S）
    :param trainMarkedWords: 按照语料库标记的数据，二维数组
    :param trainCategory:
    :return:
    """
    numTrainDoc = len(trainMarkedWords)
    numWords = len(trainMarkedWords[0])
    # 是垃圾邮件的先验概率P(S)
    pSpam = sum(trainCategory) / float(numTrainDoc)

    # 统计语料库中词汇在S和H中出现的次数
    wordsInSpamNum = np.ones(numWords)
    wordsInHealthNum = np.ones(numWords)
    spamWordsNum = 2.0
    healthWordsNum = 2.0
    for i in range(0, numTrainDoc):
        if trainCategory[i] == 1:  # 如果是垃圾SMS或邮件
            wordsInSpamNum += trainMarkedWords[i]
            spamWordsNum += sum(trainMarkedWords[i])  # 统计Spam中语料库中词汇出现的总次数
        else:
            wordsInHealthNum += trainMarkedWords[i]
            healthWordsNum += sum(trainMarkedWords[i])
    # 计算语料库中词汇的spamicity：P（Wi|S）和P（Wi|H）
    # pWordsSpamicity = []
    #
    # for num in wordsInSpamNum:
    #     if num == 0:
    #         pWordsSpamicity.append(np.log(pSpam))
    #     else:
    #         pWordsSpamicity.append(np.log(num / spamWordsNum))
    #
    # pWordsHealthy = []
    # for num1 in wordsInHealthNum:
    #     if num1 == 0:
    #         pWordsHealthy.append(np.log(1-pSpam))
    #     else:
    #         pWordsHealthy.append(np.log(num1 / healthWordsNum))
    #
    # return np.array(pWordsSpamicity), np.array(pWordsHealthy), pSpam

    pWordsSpamicity = np.log(wordsInSpamNum / spamWordsNum)
    pWordsHealthy = np.log(wordsInHealthNum / healthWordsNum)

    return pWordsSpamicity, pWordsHealthy, pSpam


def getTrainedModelInfo():
    """
    获取训练的模型信息
    :return:
    """
    # 加载训练获取的语料库信息
    from libs import get_root_path
    base_path = get_root_path('data/trained')
    vocabularyList = getVocabularyList(base_path + 'vocabularyList.txt')
    pWordsHealthy = np.loadtxt(base_path + 'pWordsHealthy.txt', delimiter='\t')
    pWordsSpamicity = np.loadtxt(base_path + 'pWordsSpamicity.txt', delimiter='\t')
    with open(base_path + 'pSpam.txt') as fr:
        pSpam = float(fr.read().strip())

    return vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam


def classify(vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam, testWords):
    """
    计算联合概率进行分类
    :param vocabularyList:
    :param pWordsSpamicity:
    :param pWordsHealthy:
    :param pSpam:
    :param testWords:
    :return:
    """
    testWordsCount = setOfWordsToVecTor(vocabularyList, testWords)
    testWordsMarkedArray = np.array(testWordsCount)
    # 计算P(Ci|W)，W为向量。P(Ci|W)只需计算P(W|Ci)P(Ci)
    p1 = sum(testWordsMarkedArray * pWordsSpamicity) + np.log(pSpam)
    p0 = sum(testWordsMarkedArray * pWordsHealthy) + np.log(1 - pSpam)
    if p1 > p0:
        return 1
    else:
        return 0
