from sklearn.datasets import load_iris
import numpy as np
from collections import defaultdict
from operator import itemgetter


dataset = load_iris()
# 萼片的长宽，花瓣的长宽
X = dataset.data
y = dataset.target

# 离散化，每列数据的平均值
attribute_means = X.mean(axis=0)
X_d = np.array(X >= attribute_means, dtype='int')


# 统计特征值个数
def train_feature_value(X, y_true, feature_index, value):
    class_counts = defaultdict(int)
    for sample, y in zip(X, y_true):
        if sample[feature_index] == value:
            class_counts[y] += 1
        else:
            class_counts[y] += 0
    sorted_class_counts = sorted(class_counts.items(), key=itemgetter(1), reverse=True)
    most_frequent_class = sorted_class_counts[0][0]
    #计算错误率
    incorrect_predictions = [class_count for class_value, class_count
    in class_counts.items()
    # 去掉最多的
    if class_value != most_frequent_class]
    error = sum(incorrect_predictions)
    return most_frequent_class, error


def train_on_feature(X, y_true, feature_index):
    values = set(X[:, feature_index])
    predictors = {}
    errors = []
    for current_value in values:
        most_frequent_class, error = train_feature_value(X,
        y_true, feature_index, current_value)
        predictors[current_value] = most_frequent_class
        errors.append(error)
        total_error = sum(errors)
    return predictors, total_error


# print(train_feature_value(X_d,y,0,1))
for i in range(4):
    train_on_feature(X_d,y,i)
