import pandas as pd


class NaiveBayes(object):
    def getTrainSet(self):
        dataSet = pd.read_csv('weather_data.csv')
        trainData = dataSet.iloc[:, :-1]  # 特征部分
        labels = dataSet.iloc[:, -1]  # 标签部分
        return trainData, labels

    def classify(self, trainData, labels, features):
        # 求labels中每个label的先验概率
        P_y = labels.value_counts(normalize=True).to_dict()
        # 求label与feature同时发生的概率
        P_xy = {}
        for label in P_y.keys():
            label_indices = labels[labels == label].index
            for feature in features.items():
                feature_indices = trainData.index[(trainData[feature[0]] == feature[1])]
                xy_count = len(set(label_indices) & set(feature_indices))
                pkey = str(feature[0]) + '*' + str(feature[1]) + '|' + str(label)
                P_xy[pkey] = xy_count / len(labels[label_indices])
        # 求条件概率
        P = {}
        for label in P_y.keys():
            for feature in features.items():
                pkey = str(feature[0]) + '|' + str(label)
                P[pkey] = P_xy[str(feature[0]) + '*' + str(feature[1]) + '|' + str(label)] / P_y[label]
        # 求后验概率
        F = {}
        for label in P_y.keys():
            F[label] = P_y[label]
            for feature in features.items():
                F[label] *= P[str(feature[0]) + '|' + str(label)]
        # 找到概率最大值对应的类别
        features_label = max(F, key=F.get)
        return features_label


if __name__ == '__main__':
    nb = NaiveBayes()
    # 训练数据
    trainData, labels = nb.getTrainSet()
    # 待预测特征
    features = {'Outlook': 'Sunny', 'Temp': 'Cool', 'Humidity': 'High', 'Windy': 'Strong'}
    # 进行预测
    result = nb.classify(trainData, labels, features)
    print(features, '属于', result)
