"""
Decision Tree: Classification
入口均在最后
数据预处理和 NaiveBayes 一样灰度仅有0，1
分类0-9
"""
# mnist_train:60000
# mnist_test:10000
# acc: 0.8636
# time: 686s


import pandas as pd
import numpy as np
import time
from collections import Counter


def loadData(fileName):

    data = pd.read_csv(fileName, header=None)
    data = data.values

    y_label = data[:, 0]
    x_label = data[:, 1:]

    x_label[x_label < 128] = 0
    x_label[x_label >= 128] = 1

    return x_label, y_label


def calc_H_D(labelArray):  # labelArray 当前y标签集数组 就是个一维数组
    # 0,2,4,7,3,6,2,9...,8,1,6
    H_D = 0

    labelSet = set([label for label in labelArray])
    # 遍历每一个出现过的标签
    for i in labelSet:
        p = labelArray[labelArray == i].size / labelArray.size
        # 对经验熵的每一项累加求和
        H_D += -1 * p * np.log2(p)

    # 返回经验熵
    return H_D


def calc_H_D_A(column, y_label):
    '''
    :param column: 特征A所在列  需要np.array
    :param y_label: 分类结果类，D 需要np.array
    :return: 条件熵
    '''

    # 计算特征A的几种取值
    types = set([i for i in column])

    # 计算出特征Ai的条件下的信息熵
    H_D_Ai = {}

    type_dic = {}  # 用来计数每个Di有多少种
    for i in types:
        # 初始化type_dic
        type_dic[i] = 0

        H_D_Ai[i] = calcul_H_D(y_label[column == i])

    # Di/D
    for i in range(len(column)):
        type_dic[column[i]] += 1

    # 条件熵
    H_D_A = 0
    for i in types:
        H_D_A += type_dic[i]/len(column)*H_D_Ai[i]
    return H_D_A


def findMaxFeature(x_train, y_train):  # 确定feature
    features = X_train.shape[1]
    H_D = calc_H_D(y_train)
    H_D_A = 0
    max_gain = -10000
    max_feature = -1

    for feature in range(features):
        H_D_A = calc_H_D_A(x_train[:, feature], y_train)
        if H_D-H_D_A > max_gain:
            max_gain = H_D - H_D_A
            max_feature = feature
    return max_feature, max_gain


def findCluster(labelArray):
    # 使用counter，对每一个出现的特征计数
    ans = Counter(labelArray)
    # 找到出现次数第一多的
    cluster = ans.most_common(1)[0][0]
    return cluster


def cutData(x_train, y_train, ag, ai):

    rest_train_data = []  # 切分之后的训练集
    rest_train_label = []  # 切分之后的标签

    for i in range(len(X_train)):
        if X_train[i][ag] == ai:

            rest_train_data.append(
                list(X_train[i][0:ag])+list(X_train[i][ag+1:]))
            rest_train_label.append(y_train[i])
    return np.array(rest_train_data), np.array(rest_train_label)


def creTree(X_train, y_train):
    
    epsilon = 0.1

    print(f'create tree,data_length={len(X_train)}')

    # 查看总共还有多少分类
    clusters = set([i for i in y_train])

    #判定结束
    if len(clusters) == 1:
        # y_train中所有分类都是一样的，直接返回第一个
        return y_train[0]

    
    if len(X_train[0]) == 0:
        return findCluster(y_train)

    
    feature, gain = findMaxFeature(X_train, y_train)
    
    #判定结束
    if gain < epsilon:
        return findCluster(y_train)

    # else
    types = set([i for i in X_train[:, feature]])

    tree_dic = {feature: {}}

    for i in types:
        # 返回的是一个元组
        rest_X_train, rest_y_train = cutData(X_train, y_train, feature, i)
        tree_dic[feature][i] = creTree(rest_X_train, rest_y_train)

    return tree_dic


def predict(x_test, tree):
    while True:  # 一直循环，直到在tree中找到位置

        # 得到树中的分类特征，依据分类结果
        # print(tree)

        (key, value), = tree.items()
        if type(value).__name__ == 'dict':
            # 如果值仍为字典，则我们需要继续遍历
            # 删除该分类特征（key），
            

            feature = x_test[key]
            del x_test[key]
            tree = value[feature]
            if type(tree).__name__ == 'int64':
                return tree
        else:
            
            return value


def test(x_test, y_test, tree):
    acc_num = 0
    acc = 0
    for i in range(len(X_test)):
        y_pred = predict(list(X_test[i]), tree)
        if y_pred == y_test[i]:
            acc_num += 1
        print(f'find {i}th data cluster:y_pred={y_pred},y={y_test[i]}')
        print('now_acc=', acc_num / (i + 1))


if __name__ == "__main__":

    start = time.time()

    X_train, y_train = loadData('Mnist/mnist_train.csv')
    X_test, y_test = loadData('Mnist/mnist_test.csv')

    tree = creTree(X_train, y_train)
    test(X_test, y_test, tree)

    end = time.time()

    print('run time:', end - start)
