import pandas as pd
from pandas import DataFrame
import DataMiningtools as DMTS

class DecisionTree (): #封装了决策树算法
    DMT = DMTS.DataMiningTools()

    def __ChooseAttribute (self,node_id): 
        #计算划分的属性，返回列表先属性索引后基尼指数差
        sub_train = self.train_list[node_id]
        att_used = self.att_used_list[node_id]

        col = sub_train.shape[1]
        max_gini_delta = 0
        attribute = 0
        result_arr = []

        for i in range (0,col):
            if att_used[i] == 1: #属性已被使用过
                continue

            gini_delta = self.__Gini_delta(sub_train,i)
            if gini_delta > max_gini_delta:
                max_gini_delta = gini_delta
                attribute = i
            
        result_arr.append(attribute)
        result_arr.append(max_gini_delta)
        return result_arr


    def __AddNode (self,node_id,attribute,a_value): #为父节点node_id新加孩子结点
        length = len (self.node) 
        sub_train = self.train_list[node_id]
        child_att_used = self.att_used_list[node_id]
        child_att_used[attribute] = 1 #设置孩子结点已划分过的属性
        child_list = self.node[node_id] #父节点的孩子节点列表
        self.div_att[node_id] = attribute

        while len(child_list) < a_value+1: #初始化孩子子节点序列
            child_list.append(0)

        self.node.append([])
        self.div_att.append([])
        self.att_used_list.append(child_att_used) #在node列表后为新节点增加一项
        
        child_list[a_value] = length #指定父节点的孩子节点位置

        child_train = sub_train[sub_train.iloc[:,attribute] == a_value] #划分子训练集
        self.train_list.append(child_train)
        
        return length #返回孩子结点在node中的位置

    
    def __DivideNode (self,node_id,attribute): #节点按属性划分
        a_num = self.a_num_arr[attribute]
        for i in range (0,a_num):
            self.__AddNode(node_id,attribute,i)
        

    def __Gini (self,sub_train): #基尼指数
        train_size = sub_train.shape[0]
        c_count = [] #记录每类的样本个数

        if train_size == 0: #空数据集
            return 0

        for i in range (0,self.c_num):
            c_count.append(0)

        for i in range (0,train_size):
            c_value = sub_train.iloc[i,self.categorization]
            c_count[c_value] += 1

        G = 1

        for i in range (0,self.c_num):
            G -= (c_count[i]/train_size)**2
        
        return G


    def __Gini_delta (self,sub_train,attribute): #基尼指数差
        a_num = self.a_num_arr[attribute]

        G = self.__Gini(sub_train)
        train_size = sub_train.shape[0]

        GA = 0

        for i in range (0,a_num):
            data = sub_train[sub_train.iloc[:,attribute] == i]
            data_size = data.shape[0]
            
            GA += (data_size/train_size)*self.__Gini(data)

        G_delta = G - GA
        return G_delta


    def DecisionTree_Model (self,sample):
        point = 0 #指向当前访问的节点

        while len(self.node[point]) != 0: #非叶子节点
            attribute = self.div_att[point]
            a_value = sample.iloc[0,attribute]
            point = (self.node[point])[a_value] #指向孩子结点

        train = self.train_list[point] #叶子节点数据集
        col = train.shape[1]
        train_size = train.shape[0]
        train = train.reset_index()
        train = train.iloc[0:train_size,1:col+1]
        c_value = self.DMT.ValueCounter(train,self.categorization)

        c_max = 0
        for i in range(0,len(c_value)):
            if c_value[i] > c_value[c_max]:
                c_max = i
        
        return c_max


    def __init__(self,data,categoriaztion) -> None: #构造决策树
        self.node = [] #记录每个节点的孩子结点情况
        self.train_list = [] #记录每个节点的训练集
        self.div_att = [] #记录每个节点划分的属性
        self.att_used_list = [] #记录每个节点还可以划分的属性
        self.orig_att_used = [] #记录根节点可划分的属性
        self.threshold = 0.001 #基尼指数差最小差值
        self.a_num_arr = self.DMT.AttributeMax(data) #每个属性值的个数
        self.categorization = categoriaztion
        self.c_num = self.a_num_arr[self.categorization]

        self.node.append([])
        self.train_list.append(data)
        self.div_att.append([]) #初始化根节点
        col = data.shape[1]

        for i in range (0,col): #构造根节点的可用划分属性
            if i == categoriaztion: #保证目标变量不会被作为属性划分
                self.orig_att_used.append(1)
            self.orig_att_used.append(0)

        self.att_used_list.append(self.orig_att_used)

        i = 0 #记录当前遍历到哪个节点

        while True:
            train = self.train_list[i]
            count = self.DMT.AttributeNum(train)[categoriaztion]

            if count <= 1: #训练集类别种类只有1个或没有样本，不再划分
                if i+1 == len(self.node): #已遍历完所有节点
                    break
                i += 1
                continue

            result_arr = self.__ChooseAttribute(i)
            attribute = result_arr[0]
            gini_delta = result_arr[1]

            if gini_delta <= self.threshold: #基尼指数差的差值小于门限值，不再划分
                if i+1 == len(self.node): #已遍历完所有节点
                    break
                i += 1
                continue
            
            self.__DivideNode(i,attribute)

            i += 1