import os
import struct
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from    alive_progress import alive_bar

# 读取数据
def load_mnist(path, kind='train'):
    """Load MNIST data from `path`"""
    labels_path = path + '/' + kind + '-labels.idx1-ubyte'
    images_path = path + '/' + kind + '-images.idx3-ubyte'

    with open(labels_path, 'rb') as lbpath:
        magic, n = struct.unpack('>II',
                                 lbpath.read(8))
        labels = np.fromfile(lbpath,
                             dtype=np.uint8)

    with open(images_path, 'rb') as imgpath:
        magic, num, rows, cols = struct.unpack('>IIII',
                                               imgpath.read(16))
        images = np.fromfile(imgpath,
                             dtype=np.uint8).reshape(len(labels), 784)

    return images, labels

def visual_mnist(images, labels):
    # 查看mnist数据集的图片
    fig, ax = plt.subplots(
        nrows=2,
        ncols=5,
        sharex=True,
        sharey=True, )

    ax = ax.flatten()
    for i in range(10):
        img = images[labels == i][0].reshape(28, 28)
        ax[i].imshow(img, cmap='Greys', interpolation='nearest')

    ax[0].set_xticks([])
    ax[0].set_yticks([])
    plt.tight_layout()
    plt.show()

# 节点定义
class Node(object):

    def __init__(self,dim,feature,label,child=None):
        self.dim = dim # 切分此节点的特征的索引,叶节点为-1
        self.feature = feature # 如果节点为叶节点则为-1.否则表示对应样本的特征,根节点为0
        self.label   = label   # 如果节点为叶节点则表示对应样本的分类，否则为-1
        self.child   = child

    def __str__(self):
        return str(self.label)  #print 一个 Node 类时会打印 __str__ 的返回值

class DTree(): # 决策树 ID3 算法

    def __init__(self,x,y,e=0.2):
        self.e = e
        self.root = self.fit(x,y)

    def fit(self,x,y,feature=0):

        m,_ = x.shape

        labels_num = len(np.unique(y))
        entrpy = self.calc_ent(y)

        if labels_num == 0:
            return None

        elif labels_num == 1:
            return Node(-1,feature,y[0])

        elif entrpy < self.e:
            label_counts = {}
            for i in y:
                if i not in label_counts.keys():
                    label_counts[i] = 0
                label_counts[i] += 1
            index = np.argmax(label_counts.values())
            return Node(-1,feature,list(label_counts.keys())[index])

        else:

            dim = self.calc_gain(x,y)

            x_cut = {}
            for i in range(m):
                if x[i,dim] not in x_cut.keys():
                    x_cut[x[i,dim]] = []
                    x_cut[x[i,dim]].append([])
                    x_cut[x[i,dim]].append([])
                x_cut[x[i,dim]][0].append(x[i,:])
                x_cut[x[i,dim]][1].append(y[i])
            child = []
            for j,i in x_cut.items():
                child.append( self.fit(np.array(i[0]),np.array(i[1]),j) )

            return Node(dim,feature,-1,child)

    def score(self,x_test,y_test):

        y_pred = []
        with alive_bar(len(y_test)) as bar: # declare your expected total
            for i in range(len(y_test)):
                y_pred.append(self.predict( x_test[i,:] ))
                bar()

        total_num = len(y_test)
        corre_num = np.sum( (y_pred == y_test).astype(int) )
        acc = corre_num / total_num

        return acc

    def predict(self,x):
        label = self.serach(x)
        return label

    def calc_ent(self,y): # 计算熵

        n = len(y)
        label_counts = {}
        for i in y:
            if i not in label_counts.keys():
                label_counts[i] = 0
            label_counts[i] += 1
        p = np.array(list(label_counts.values())) / n
        ent = - np.sum( p * np.log2(p) )

        return ent

    def cond_ent(self,x,y,axis=0): # 计算条件熵

        n = len(y)
        x_counts = {}
        for i in range(n):
            if x[i,axis] not in x_counts.keys():
                x_counts[x[i,axis]] = []
                x_counts[x[i,axis]].append(0)
                x_counts[x[i,axis]].append([])
            x_counts[x[i,axis]][0] += 1
            x_counts[x[i,axis]][1].append(y[i])
        ent = np.sum([ p[0]/n * self.calc_ent(p[1]) for p in x_counts.values() ])

        return ent

    def calc_gain(self,x,y): # 计算互信息
        
        ent = self.calc_ent(y)
        cond_e = []
        _,n = x.shape
        for i in range(n):
            cond_e.append( self.cond_ent(x,y,i) )
        gain = ent - np.array(cond_e) 
        dim = np.argmax(gain)

        return dim

    def serach(self,x): # 搜索
        Node = self.root
        while Node.child != None:
            for i in Node.child:
                if x[Node.dim] == i.feature:
                    Node = i
                    break
        return Node.label

# 主函数
def main():

    # 加载数据，打印基本信息
    images_train, labels_train = load_mnist('../Datasets/mnist')
    print('Images_train Shape:{:<25s} Labels_train Shape:{:<25s}'
            .format(str(images_train.shape),str(labels_train.shape)))
    print('Images_train type :{:<25s} Labels_train type :{:<25s}'
            .format(str(type(images_train)),str(type(labels_train))))
    images_test, labels_test = load_mnist('../Datasets/mnist','t10k')
    print('Images_test Shape :{:<25s}  Labels_test Shape:{:<25s}'
            .format(str(images_test.shape),str(labels_test.shape)))
    print('Images_test type  :{:<25s}  Labels_test type :{:<25s}'
            .format(str(type(images_test)),str(type(labels_test))))

    # 特征数据二值化
    Threshold = 127
    images_train = (images_train > Threshold).astype(int)
    images_test  = (images_test  > Threshold).astype(int)

    # 创建模型
    DecisionTree = DTree(images_train,labels_train)
    print('complete!')

    # 测试
    print('Decision Tree ACC:',DecisionTree.score(images_test,labels_test))

if __name__ == '__main__':
    main()