# !/usr/bin/env/ python3
# -*- coding: utf-8 -*-

"""
朴素贝叶斯算法是基于贝叶斯定理和特征之间调价了独立假设的分类方法。
首先基于特征条件独立假设学习输入/输出的联合概率分布；
然后基于此模型，对给定的输入 x，利用贝叶斯定理求出后验概率最大的输出 y。

以下实现将连续特征离散化(区间化)，转换成离散情形，完全按照
特征离散情形完成分类的算法实现
"""

from __future__ import division, print_function
from sklearn import datasets
import matplotlib.pyplot as plt 
import math
import numpy as np 

def shuffle_data(X, y, seed = None):
    """
    打乱数据
    """
    if seed:
        np.random.seed(seed)
    
    idx = np.arange(X.shape[0])
    np.random.shuffle(idx)

    return X[idx], y[idx]

def normalize(X, axis = -1, p = 2):
    """
    正规化数据集 X
    """
    lp_norm = np.atleast_1d(np.linalg.norm(X, p, axis))
    lp_norm[lp_norm == 0] = 1
    return X / np.expand_dims(lp_norm, axis)

def standardize(X):
    """
    标准化数据集 X
    """
    X_std = np.zeros(X.shape)
    mean = X.mean(axis = 0)
    std = X.std(axis = 0)

    # 做除法运算时请永远记住分母不能等于 0 的情形
    # X_std = (X - X.mean(axis = 0)) / X.std(axis = 0)
    for col in range(np.shape(X)[1]):
        if std[col]:
            X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]
    return X_std

def train_test_split(X, y, test_size = 0.2, shuffle = True, seed = None):
    """
    划分数据集为训练集和测试集
    """
    if shuffle:
        X, y = shuffle_data(X, y, seed)
    
    n_train_samples = int(X.shape[0] * (1 - test_size))
    x_train, x_test = X[:n_train_samples], X[n_train_samples:]
    y_train, y_test = y[:n_train_samples], y[n_train_samples:]

    return x_train, x_test, y_train, y_test

def accuracy(y, y_pred):
    y = y.reshape(y.shape[0], -1)
    y_pred = y_pred.reshape(y_pred.shape[0], -1)
    return np.sum(y == y_pred) / len(y)


class NaiveBayes():
    """
    朴素贝叶斯分类模型
    """
    def __init__(self):
        self.classes = None
        self.X = None
        self.y = None
        # 存储每个类别标签数据集中每个特征中每个特征值的出现概率，因为预测的时候需要
        # 模型训练的过程中其实就是计算出这些概率
        self.parameters = []

    def fit(self, X, y):
        self.X = X
        self.y = y
        self.classes = np.unique(y)
        # 遍历所有类别的数据集，计算每一个类别数据集每个特征中每个特征值的出现概率
        for i in range(len(self.classes)):
            c = self.classes[i]
            # 选出该类别的数据集
            x_where_c = X[np.where(y == c)]

            self.parameters.append([])
            # 遍历该类别数据的所有特征，计算该类别数据集每个特征中每个特征值的出现概率
            for j in range(x_where_c.shape[1]):
                feature_values_where_c_j = np.unique(x_where_c[:, j])

                parameters = {}
                # 遍历整个训练数据集该特征的所有特征值(如果遍历该类别
                # 数据集 x_where_c 中改特征的所有特征值，则每列的特征
                # 值都不全，因此整个数据集 X 中存在但是不在 x_where_c
                # 中的特征值将得不到其概率, feature_values_where_c_j),
                # 计算该类别数据集该特征中每个特征值的出现概率
                for feature_value in X[:, j]: # feture_values_where_c_j
                    n_feature_value = x_where_c[x_where_c[:, j] == feature_value].shape[0]
                    # 用 Laplance 平滑对概率进行修正，并且用取对数的方法将累乘转成累加的形式
                    parameters[feature_value] = np.log((n_feature_value + 1) / 
                        (x_where_c.shape[0] + len(feature_values_where_c_j)))
                self.parameters[i].append(parameters)

    def calculate_priori_probability(self, c):
        """
        计算先验概率
        """
        x_where_c = self.X[np.where(self.y == c)]
        n_samples_for_c = x_where_c.shape[0]
        n_samples = self.X.shape[0]
        return (n_samples_for_c + 1) / (n_samples + len(self.classes))

    def classify(self, sample):
        """
        分类
        根据后验概率确定所属类别标签
        """
        posteriors = []

        # 遍历所有类别
        for i in range(len(self.classes)):
            c = self.classes[i]
            prior = self.calculate_priori_probability(c)
            posterior = np.log(prior)

            # probability = P(Y) * P(x1|Y) * P(X2|Y) * ... * P(xN|Y)
            # 遍历所有特征
            for j, params in enumerate(self.parameters[i]):
                # 取出预测样本的第 j 个特征
                sample_feature = sample[j]
                # 取出参数中第 i 类别第 j 个特征特征值为 sample_feature 的概率，
                # 如果测试集中的样本没有特征值出现，则假设该特征值的概率为
                # 1 / self.X.shape[0]
                proba = params.get(sample_feature, np.log(1 / self.X.shape[0]))

                # 朴素贝叶斯模型假设特征之间条件独立，即 P(x1, x2, x3|Y) = P(x1|Y) * P(x2|Y) * P(x3|Y)
                posterior += proba

            posteriors.append(posterior)

        # 对概率进行排序
        index_of_max = np.argmax(posteriors)
        max_value = posteriors[index_of_max]

        return self.classes[index_of_max]

    def predict(self, X):
        # 对数据集进行类别预测
        y_pred = []
        for sample in X:
            y = self.classify(sample)
            y_pred.append(y)
        return np.array(y_pred)

def main():
    X = np.array([['M','北京'], ['F', '上海'], ['M' ,'广州'], ['M' ,'北京'], 
                  ['F' ,'上海'], ['M','北京'], ['F', '上海'], ['M' ,'广州'], 
                  ['M' ,'北京'], ['F' ,'上海']])
    y = np.array([1, 0, 1, 1, 0, 1, 0, 1, 1, 0])

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.6)

    clf = NaiveBayes()
    clf.fit(X_train, y_train)
    y_pred = np.array(clf.predict(X_test))

    accu = accuracy(y_test, y_pred)

    print ("Accuracy:", accu)


if __name__ == "__main__":
    main()