import pandas as pd
import numpy  as np

class NaiveBayesianClassifier:
    def __init__(self):
        self.model={}

    def fit(self,X_train,Y_train=pd.Series()):
        if not Y_train.empty: # 如果不传，自动选择最后一列作为分类标签
            X_train = pd.concat([X_train, Y_train], axis=1)
        self.model=self.buildNB(X_train)
        return self.model

    def buildNB(self,X_train):
        Y_train=X_train.iloc[:,-1]

        y_count = Y_train.value_counts(normalize=True)  # 频次汇总 得到各个特征对应的概率

        # yTrainCounts = yTrainCounts.apply(lambda x: (x + 1) / (yTrain.size + yTrainCounts.size))  # 使用了拉普拉斯平滑
        class_dict={}
        for clss,p in y_count.items():
            class_dict[clss]={'PClass':p,'PFeature':{}}
        # print(class_dict)

        prop_names=X_train.columns[:-1]
        all_feature_values={}
        for namefeature in prop_names:
            # print((X_train[namefeature].value_counts(normalize=True)))
            #不同特征有多少种不同的取值
            all_feature_values[namefeature]=list(X_train[namefeature].value_counts(normalize=True).index)#dataframe[n] 返回单列 index() 函数用于从列表中找出某个值第一个匹配项的索引位置。

        # print(all_feature_values)
        #for different label class
        for nameClass,group in X_train.groupby(X_train.iloc[:,-1]):
            # print(group)
            #for different val
            for featurename in all_feature_values:
                # print(featurename)
                eachClassFeatureP = {}
                propDatas = group[featurename]
                feature_value_prop=propDatas.value_counts(normalize=True)
                # print("feature name:",featurename,"\n",feature_value_prop)
                for propName in all_feature_values[featurename]:
                    if not feature_value_prop.get(propName):
                        feature_value_prop[propName]=0
                # N=len(all_feature_values[namefeature])
                # propClassSummary = propClassSummary.apply(lambda x: (x + 1) / (propDatas.size + N))  # 使用了拉普拉斯平滑
                # print(feature_value_prop)
                for feature_value, valP in feature_value_prop.items():
                    eachClassFeatureP[feature_value] = valP
                class_dict[nameClass]['PFeature'][featurename] = eachClassFeatureP
        # print(class_dict)
        return class_dict

    def predictBySeries(self,data):
        curMaxRate = None
        curClassSelect = None
        for class_name,info in self.model.items():
            rate=0
            rate+=np.log(info['PClass']) # 使用log加法避免很小的小数连续乘，接近零
            PFeature=info['PFeature']

            for feature_name,val in data.items():
                props=PFeature.get(feature_name)
                if not props:
                    continue
                rate+=np.log(props.get(val,0)+1e-5) # 如果指定键的值不存在时，返回默认值 0 。

            if curMaxRate == None or rate > curMaxRate:
                curMaxRate = rate
                curClassSelect = class_name

        return curClassSelect

    def predict(self, data):
        if isinstance(data, pd.Series): # isinstance函数来判断一个对象是否是一个已知的类型，类似 type()。
            return self.predictBySeries(data)
        return data.apply(lambda d: self.predictBySeries(d), axis=1)



from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

if __name__=="__main__":
    dataset=load_iris()
    X=dataset.data
    Y=dataset.target
    X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2)

    NBC=NaiveBayesianClassifier()
    NBC.fit(pd.DataFrame(X_train,columns=dataset.feature_names),pd.DataFrame(Y_train,columns={'label'}))
    # print(dataset.feature_names)
    test=pd.DataFrame(X_test,columns=dataset.feature_names)
    print(test.shape[0])
    predict=NBC.predict(test)

    plt.scatter(range(test.shape[0]), Y_test,s=80, color="green",alpha=1,marker='s')
    plt.scatter(range(test.shape[0]),predict, s=40,color="red",alpha=1.0,marker='^')
    plt.show()



