import numpy as np
def loadDataSet():
    '''
    Author:'zhuanli'
    Parameters:'int'
    Returns:'wordlist - 切分的词条
        classVec - 类别标签'
    '''
    wordlist = [['my','dog','has','flea','problems','help','please'],
                ['maybe','not','take','him','to','dog','park','stupid'],
                ['my','dalmation','is','so','cute','I','love','him'],
                ['stop','posting','stupid','worthless','garbage'],
                ['mr','licks','ate','my','steak','how','to','stop','him'],
                ['quit','buying','worhtless','dog','food','stupid']]
    classVec = [0,1,0,1,0,1]
    return wordlist,classVec

def createWordList(dataset):
    '''
    Author:'zhuanli'
    Parameters:dataset - 词汇表
    Returns:wordSet - 词汇表
    '''
    wordSet = set([])
    for document in dataset:
        wordSet = wordSet | set(document)
    return list(wordSet)

def setWord2Vec(vocabList,inputSet):
    '''
    Author:'zhuanli'
    Parameters:vocabList - 词汇表
        inputSet - 切分的词条
    Returns:0-1化(向量化)的切分的词条
    '''
    returnVec = [0]*len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] = 1
        else:
            print("the word %s is not in my vocabulary" %word)
    return returnVec

def train0(matrix,category):
    '''
    训练函数
    Author:'zhuanli'
    Parameters:matrix - setWord2Vec返回的向量化的切分的词条
        category - 类别标签
    Returns: p0Vect - 非侮辱类的条件概率数组
	    p1Vect - 侮辱类的条件概率数组
	    pAbusive - 文档属于侮辱类的概率
    '''
    trainNum = len(matrix)
    wordNum = len(matrix[0])
    pAbusive = sum(category)/float(trainNum)
    p0Num = np.zeros(wordNum)
    p1Num = np.zeros(wordNum)
    p0Denom = 0.0 
    p1Denom = 0.0
    for i in range(trainNum):
        if category[i] == 1:
            p1Num += matrix[i]
            p1Denom += sum(matrix[i])
        else:
            p0Num += matrix[i]
            p0Denom += sum(matrix[i])
    p1Vect = p1Num/p1Denom
    p0Vect = p0Num/p0Denom
    return p0Vect,p1Vect,pAbusive
    
def classify0(p0,p1,pA):
    '''
    分类函数
    Author:'zhuanli'
    Parameters:
    Returns:
    '''
    
def bagWord2Vec(vocabList,inputSet):
    returnVec = [0]*len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] += 1
    return returnVec

def textParse(bigString):
    import re
    listOfTokens = re.split(r'\w+',bigString)
    return [tok.lower() for tok in listOfTokens if len(tok) > 2]

def spamTest():
    doclist = []
    classlist = []

if __name__ == '__main__':
    wordlist,classvec = loadDataSet()
    wordset = createWordList(wordlist)
    trainMat = []
    for doc in wordlist:
        trainMat.append(setWord2Vec(wordset,doc))
    p0,p1,pAb = train0(trainMat,classvec)
    print(p0,p1,pAb)
    
