#!/usr/bin/python2.7
# _*_ coding: utf-8 _*_

"""
@Author: MarkLiu
"""
import numpy as np
# encoding=utf-8
import jieba
from config import *
import time
import random
from multiprocessing import Process, Queue
def textParser(text):
    """
    对SMS预处理，去除空字符串，并统一小写
    :param text:
    :return:
    """
    import re
    regEx = re.compile(u'[^\w\u4e00-\u9fa5\d]+')  # 匹配非字母或者数字，即去掉非字母非数字，只留下单词
    seg_list = jieba.cut(text, cut_all=False)
    seg_text = (" ".join(seg_list))
    words = regEx.split(seg_text)
    # 去除空字符串，并统一小写
    words = [word.lower() for word in words if len(word) > 0]
    return words


def loadSMSData(fileName):
    """
    加载评论数据
    :param fileName:
    :return:
    """
    classCategory = []  # 类别标签，1表示是垃圾评论，0表示正常评论
    smsWords = []
    f = open(fileName)
    for line in f.readlines():
        print line.decode('utf-8')
        linedatas = line.strip().split('::')
        if linedatas[0]=='':
            continue
        if linedatas[0] == 'ham':
            classCategory.append(0)
        elif linedatas[0] == 'spam':
            classCategory.append(1)
        # 切分文本
        words = textParser(linedatas[1])
        smsWords.append(words)
    return smsWords, classCategory


def createVocabularyList(smsWords):
    """
    创建语料库
    :param smsWords:
    :return:
    """
    vocabularySet = set([])
    for words in smsWords:
        vocabularySet = vocabularySet | set(words)
    vocabularyList = list(vocabularySet)
    return vocabularyList


def getVocabularyList(fileName):
    """
    从词汇列表文件中获取语料库
    :param fileName:
    :return:
    """
    fr = open(fileName)
    vocabularyList = fr.readline().strip().split('\t')
    fr.close()
    return vocabularyList


def setOfWordsToVecTor(vocabularyList,smsWords):
    """
    SMS内容匹配预料库，标记预料库的词汇出现的次数
    :param vocabularyList:
    :param smsWords:
    :return:
    """
    vocabMarked = [0] * len(vocabularyList)
    for smsWord in smsWords:
        smsWord = smsWord.encode('utf-8')
        if smsWord in vocabularyList:
            vocabMarked[vocabularyList.index(smsWord)] += 1
    return np.array(vocabMarked)


def setOfWordsListToVecTor(vocabularyList,smsWords):
    """
    将文本数据的二维数组标记
    :param vocabularyList:
    :param smsWordsList:
    :return:
    """
    #性能瓶颈 该值非常耗内存 使用多线程 加快处理速度
    vocabMarkedList = []
    allCount = len(smsWords)
    processSize = allCount/processCount
    '''
    for page in range(processCount):
        f = open('data/study/tmp_%s' % page)
        for line in f.readlines():
            vocabMarkedList.append(line.split(SPLIT_TAB))
    return vocabMarkedList
    '''
    plist = []
    for page in range( processCount ):
        plist.append( Process(target=setOfWordsListToVecTorByProcess,\
            args=( page*processSize, processSize, smsWords, vocabularyList,)) )
    for p in plist:
        p.start()
    for p in plist:
        p.join()
    for page in range(processCount):
        f = open(sms_tmp_file % page)
        for line in f.readlines():
            value = line.split(SPLIT_TAB)
            for jj in range(len(value)):
                value[jj] = int(value[jj])
            vocabMarkedList.append(value)
            del value
    return vocabMarkedList

def setOfWordsListToVecTorByProcess(page,pageSize,smsWords,vocabularyList):
    fw = open(sms_tmp_file % (page/pageSize),'w')
    for i in range(page,page+pageSize):
        # 值越来越多
        vocabMarked = setOfWordsToVecTor(vocabularyList, smsWords[i])
        #q.put(1)
        fw.write(SPLIT_TAB.join(str(i) for i in vocabMarked))
        fw.write("\n")
        #fw.write(SPLIT_NEXT_ROW)
        #q.append(vocabMarked)
        print "标记完成第%s条数据,已写入文件" % i
        #time.sleep(random.random())
    fw.flush()
    fw.close()
def trainingNaiveBayes(trainMarkedWords, trainCategory):
    """
    训练数据集中获取语料库中词汇的spamicity：P（Wi|S）
    :param trainMarkedWords: 按照语料库标记的数据，二维数组
    :param trainCategory:
    :return:
    """
    numTrainDoc = len(trainMarkedWords)
    numWords = len(trainMarkedWords[0])
    # 是垃圾邮件的先验概率P(S)
    pSpam = sum(trainCategory) / float(numTrainDoc)

    # 统计语料库中词汇在S和H中出现的次数
    wordsInSpamNum = np.ones(numWords)
    wordsInHealthNum = np.ones(numWords)
    spamWordsNum = 2.0
    healthWordsNum = 2.0
    for i in range(0, numTrainDoc):
        if trainCategory[i] == CATEGORY_SPAM:  # 如果是垃圾SMS或邮件
            wordsInSpamNum += trainMarkedWords[i]
            spamWordsNum += sum(trainMarkedWords[i])  # 统计Spam中语料库中词汇出现的总次数
        else:
            wordsInHealthNum += trainMarkedWords[i]
            healthWordsNum += sum(trainMarkedWords[i])
    #垃圾词汇的概率
    pWordsSpamicity = np.log(wordsInSpamNum / spamWordsNum)
    #正常词汇的概率
    pWordsHealthy = np.log(wordsInHealthNum / healthWordsNum)

    return pWordsSpamicity, pWordsHealthy, pSpam

def getTrainedModelInfo():
    """
    获取训练的模型信息
    :return:
    """
    # 加载训练获取的语料库信息
    vocabularyList = getVocabularyList('vocabularyList.txt')
    pWordsHealthy = np.loadtxt('pWordsHealthy.txt', delimiter='\t')
    pWordsSpamicity = np.loadtxt('pWordsSpamicity.txt', delimiter='\t')
    fr = open('pSpam.txt')
    pSpam = float(fr.readline().strip())
    fr.close()

    return vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam


def classify(pWordsSpamicity, pWordsHealthy, DS, pSpam, testWordsMarkedArray):
    """
    计算联合概率进行分类
    :param testWordsMarkedArray:
    :param pWordsSpamicity:
    :param pWordsHealthy:
    :param DS:  adaboost算法额外增加的权重系数
    :param pSpam:
    :return:
    """
    # 计算P(Ci|W)，W为向量。P(Ci|W)只需计算P(W|Ci)P(Ci)
    ps = sum(testWordsMarkedArray * pWordsSpamicity * DS) + np.log(pSpam)
    ph = sum(testWordsMarkedArray * pWordsHealthy) + np.log(1 - pSpam)
    if ps > ph:
        return ps, ph, CATEGORY_SPAM
    else:
        return ps, ph, CATEGORY_HAMORUNKNOW
