from functools import reduce
import pandas as pd
import numpy as np

column_count = None

# 计算信息熵
def cal_information_entropy(data):
    data_label = data.iloc[:, -1]
    label_class = data_label.value_counts()  # 总共有多少类
    ent = 0
    for k in label_class.keys():
        p_k = label_class[k] / len(data_label)
        ent += -p_k * np.log2(p_k)
    return ent


# 计算给定数据属性a的信息增益
def cal_information_gain(data, a):
    ent = cal_information_entropy(data)
    feature_class = data[a].value_counts()  # 特征有多少种可能
    gain = 0
    for v in feature_class.keys():
        weight = feature_class[v] / data.shape[0]
        ent_v = cal_information_entropy(data.loc[data[a] == v])
        gain += weight * ent_v
    return ent - gain


# 获取标签最多的那一类
def get_most_label(data):
    data_label = data.iloc[:, -1]
    label_sort = data_label.value_counts(sort=True)
    return label_sort.keys()[0]


# 挑选最优特征，即信息增益最大的特征
def get_best_feature(data):
    features = data.columns[:-1]
    res = {}
    for a in features:
        temp = cal_information_gain(data, a)
        res[a] = temp
    res = sorted(res.items(), key=lambda x: x[1], reverse=True)
    return res[0][0]


# 将数据转化为（属性值：数据）的元组形式返回，并删除之前的特征列
def drop_exist_feature(data, best_feature):
    attr = pd.unique(data[best_feature])
    new_data = [(nd, data[data[best_feature] == nd]) for nd in attr]
    new_data = [(n[0], n[1].drop([best_feature], axis=1)) for n in new_data]
    return new_data


# 创建决策树
def create_tree(data):
    global column_count

    data_label = data.iloc[:, -1]
    if len(data_label.value_counts()) == 1:  # 只有一类
        return data_label.values[0]
    if all(len(data[i].value_counts()) == 1 for i in data.iloc[:, :-1].columns):  # 所有数据的特征值一样，选样本最多的类作为分类结果
        return get_most_label(data)
    best_feature = get_best_feature(data)  # 根据信息增益得到的最优划分特征
    tree = {best_feature: {}}  # 用字典形式存储决策树
    exist_vals = pd.unique(data[best_feature])  # 当前数据下最佳特征的取值
    if len(exist_vals) != len(column_count[best_feature]):  # 如果特征的取值相比于原来的少了
        no_exist_attr = set(column_count[best_feature]) - set(exist_vals)  # 少的那些特征
        for no_feat in no_exist_attr:
            tree[best_feature][no_feat] = get_most_label(data)  # 缺失的特征分类为当前类别最多的

    for item in drop_exist_feature(data, best_feature):  # 根据特征值的不同递归创建决策树
        tree[best_feature][item[0]] = create_tree(item[1])
    return tree


def predict(tree, test_data):
    first_feature = list(tree.keys())[0]
    second_dict = tree[first_feature]
    input_first = test_data.get(first_feature)
    input_value = second_dict[input_first]
    if isinstance(input_value, dict):  # 判断分支还是不是字典
        class_label = predict(input_value, test_data)
    else:
        class_label = input_value
    return class_label


def main():
    global column_count

    # 读取数据
    data = pd.read_csv('./dataset/wine.csv')
    # 统计每个特征的取值情况作为全局变量
    column_count = dict([(ds, list(pd.unique(data[ds]))) for ds in data.iloc[:, :-1].columns])

    # 创建决策树
    decision_tree = create_tree(data)
    # 测试数据
    labels = ['Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids',
              'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',
              'Proline', 'Class']
    test_set = []
    with open('./dataset/wine.csv') as dataset_file:
        for _ in range(100):
            dataset_file.readline()
        feature_list = dataset_file.readline().rstrip('\n').split(',')
        while feature_list[0] != '':
            feature_list = list(map(lambda s: float(s) if '.' in s else int(s), feature_list))

            test_obj = {}
            for label_idx, label in enumerate(labels):
                test_obj[label] = feature_list[label_idx]
            test_set.append(test_obj)

            feature_list = dataset_file.readline().rstrip('\n').split(',')

    idx_dict = {1: 0, 2: 1, 3: 2}
    confusion_matrix = [[0 for _ in range(3)] for _ in range(3)]
    for test_obj in test_set:
        result = predict(decision_tree, test_obj)
        confusion_matrix[idx_dict[result]][idx_dict[test_obj['Class']]] += 1

    # 显示混淆矩阵
    print('混淆矩阵:')
    class_list = list(idx_dict.keys())
    print(reduce(lambda prev, curr: prev + f'{curr: ^4d}', class_list, ' ' * 4))
    for index, row in enumerate(confusion_matrix):
        row_str = reduce(lambda prev, curr: prev + f'{curr: ^4d}', row, f'{class_list[index]: ^4d}')
        print(row_str)
    correct_total = err_total = 0
    for row_idx, row in enumerate(confusion_matrix):
        for col_idx, col in enumerate(row):
            if row_idx == col_idx:
                correct_total += col
            else:
                err_total += col
    print(f'准确率: {correct_total / (correct_total + err_total) * 100: .6f}%')


if __name__ == '__main__':
    main()
