#! /bin/usr/python
# _*_ coding:UTF-8 _*_

import numpy as np



# 加载数据集
def load_data_set():
    posting_list = [['my', 'dog', 'has', 'flea', 'problem', 'help', 'please'],
                   ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                   ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                   ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                   ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                   ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    # 有n个类别，一句话属于哪一个类别，对应位置是1,其余位置是0
    # 有两个类别，第一个位置表示非侮辱性，第二个位置表示侮辱性
    class_vec = np.array([[1, 0],  # 非侮辱性
                          [0, 1],  # 侮辱性
                          [1, 0],
                          [0, 1],
                          [1, 0],
                          [0, 1]])
    return posting_list, class_vec


# 创建文档中不重复词的单词列表
def create_vocab_list(data_set):
    vocab_set = set([])
    for document in data_set:
        vocab_set = vocab_set | set(document)
    return list(vocab_set)


# 创建词汇表向量
def set_of_words_to_vec(vocab_list, input_set):
    return_vec = [0]*len(vocab_list)
    for word in input_set:
        if word in vocab_list:
            return_vec[vocab_list.index(word)] = 1
        else:
            print "the word: %s is not in my Vocabulary!" % word
    return return_vec


# 训练贝叶斯模型
def train_nb(train_matrix, train_category):
    pldenom = 1
    (train_number, category_number) = train_category.shape  # (训练样本数目， 类别数目)
    (train_number, n) = train_matrix.shape  # 特征向量的维数
    category_each_number = np.sum(train_category == 1, 0).reshape((1, category_number))  # 每一类别出现的次数的向量
    p_c = (category_each_number + pldenom) / (float(train_number) + category_number * pldenom)  # p(y=ck) 加入啦普拉斯平滑

    # 某一类别发生，在该类别下单词出现的频数
    # index = np.argwhere(train_category[])
    category_frequency = np.zeros((category_number, 1))
    vec_rate = np.zeros((category_number, n))  #
    a = np.zeros((category_number, n))
    # 对每一类别
    for i in range(category_number):
        index = np.argwhere(train_category[:, i])  # i类别对应训练集出现在第几行
        # print np.sum(train_matrix[index[:, 0], :])
        category_frequency[i, 0] = np.sum(train_matrix[index[:, 0], :])  # 计算向量出现为1的概率
        a[i] = np.sum(train_matrix[index[:, 0], :], 0)
        # 在类别c.i下统计每一项量分量频数
        vec_rate[i] = (np.sum(train_matrix[index[:, 0], :], 0) + pldenom) / (category_frequency[i, 0] + n * pldenom)
    print a
    return p_c, vec_rate


# 利用贝叶斯算法分类
def classfy_nb(test, p_ck, pxi):
    p = np.sum(np.log(pxi) * test, 1) + np.log(p_ck)
    c = np.argwhere(p == np.max(p))[0, 1]
    c_v = np.zeros((2, 1), dtype=int)
    c_v[c, 0] = 1
    print c_v


if __name__ == "__main__":
    posting_list, class_vec = load_data_set()
    vocab_set = create_vocab_list(posting_list)
    train_mat = []
    train_mat.append(set_of_words_to_vec(vocab_set, posting_list[0]))
    train_mat.append(set_of_words_to_vec(vocab_set, posting_list[1]))
    train_mat.append(set_of_words_to_vec(vocab_set, posting_list[2]))
    train_mat.append(set_of_words_to_vec(vocab_set, posting_list[3]))
    train_mat.append(set_of_words_to_vec(vocab_set, posting_list[4]))
    train_mat.append(set_of_words_to_vec(vocab_set, posting_list[5]))
    train_matrix = np.array(train_mat)
    print train_matrix
    a, b = train_nb(train_matrix, class_vec)
    test_doc1 = ['love', 'my', 'dalmation']
    this_doc1 = set_of_words_to_vec(vocab_set, test_doc1)
    classfy_nb(this_doc1, a, b)

    test_doc2 = ['stupid', 'garbage']
    this_doc2 = set_of_words_to_vec(vocab_set, test_doc2)
    classfy_nb(this_doc2, a, b)