import pandas as pd
import numpy as np
import openpyxl
import math
from treelib import Tree, Node

tree = Tree()
print(tree.identifier)
A = ['树龄','具体生长位置','调查编号','权属','编号','中文名','冠幅  （m）','胸围  （cm）','保护级别','树高  （m）','生长势']

file_path = r'tree.xlsx'
file_path2 = r'test.xlsx'

text = pd.read_excel(file_path,index_col=0) # sheet_name不指定时默认返回全表数据
test = pd.read_excel(file_path2,index_col=0) # sheet_name不指定时默认返回全表数据
text.drop(['具体生长位置','权属','编号','保护级别'],axis=1,inplace=True) #删去无用的数据列
test.drop(['具体生长位置','权属','编号','保护级别'],axis=1,inplace=True) #删去无用的数据列
def grow(dataset):
    i = (dataset['生长势']=='旺盛')
    j = (dataset['生长势']=='一般')
    return i+j
text['生长势'] =grow(text).astype('int')
test['生长势'] =grow(test).astype('int')
#labels = data['保护级别'].unique().tolist()
#data['保护级别'] = data['保护级别'].apply(lambda n: labels.index(n))#保护级别更改为数字


#信息熵计算
def calcShannonEnt(dataSet,line):
    numEntries = 0
    for i in range(0,len(dataSet)):
        numEntries += 1  # 样本数
    labelCounts = {}   # 创建一个数据字典：key是最后一列的数值（即标签，也就是目标分类的类别），value是属于该类别的样本个数
    for featVec in range(0,len(dataSet)): # 遍历整个数据集，每次取一行
        fea = dataSet.iloc[featVec,line]
        currentLabel = dataSet.iloc[featVec,-1]  #取该行最后一列的值
        bra = str(fea) + str(currentLabel)
        if bra not in labelCounts.keys():
            labelCounts[bra] = 0
        labelCounts[bra] += 1
    shannonEnt = 0.0  # 初始化信息熵
    for key in labelCounts:
        prob = float(labelCounts[key])/numEntries
        shannonEnt -= prob * math.log(prob,2) #log base 2  计算信息熵
    return shannonEnt


#返回决策树方法下最可能的生长势趋势
def gailv(dataSet,line,name):
    one = 0
    zero = 0
    for i in range(0,len(dataSet)):
        if str(dataSet.iloc[i,line])==str(name):
            if int(dataSet.iloc[i,-1])==1:
                one+=1
            else:
                zero+=1
    if one>=zero:
        return 1
    else:
        return 0

#划分某一条件下生成的训练集
def fenzi(dataSet,line,name):
    newData = pd.DataFrame(columns=('调查编号','树龄', '中文名', '冠幅  （m）', '胸围  （cm）', '树高  （m）', '生长势'))
    for i in range(0, len(dataSet)):
        if str(dataSet.iloc[i, line]) == name:
            newData = pd.concat([newData.iloc[:,0:7],dataSet.iloc[i:i+1,0:7]],ignore_index=True)
    return newData

data = text
maxS = 0#记录最大信息熵
lie = 0#记录最大信息熵所在变量列
a = [1,2,3,4,5]#需要划分的变量所在列
fenzhi1 = 0#分支条件一所在列
fenzhi2 = {}#分支条件二所在列
correct = 0#正确数
tree.create_node(tag='root', identifier='root', data=0)#添加根节点
for t in range(0,2):#只计算2层树结构
    for i in a:#找出最大信息熵
        s = calcShannonEnt(data, i)
        if s > maxS:
            lie = i
            maxS = s
    maxS = 0
    labelCounts = {}
    a.remove(lie)
    fenzhi1 = lie#得到最大信息熵的变量列
    for featVec in range(0, len(data)):  # 遍历整个数据集，每次取一行
        fea = text.head(350).iloc[featVec, lie]
        if fea not in labelCounts.keys():#记录所在列下可能都划分条件
            labelCounts[fea] = 0
        labelCounts[fea] += 1

    if t==0:#添加第一层树节点
        for key in labelCounts:
            key = str(key)
            tree.create_node(key, key, parent='root', data=gailv(data, lie, key))  # 插入树节点

    else:#添加第二层树节点
        line  = 0
        lie+=1
        for subtree in tree.children('root'):
            name = subtree.tag
            newData = fenzi(data,lie,name)
            for i in a:
                s = calcShannonEnt(newData, i) #计算第一层树节点条件下不同变量的信息熵
                if s > maxS:
                    line = i
                    maxS = s
            maxS = 0
            fenzhi2[line] = 0#记录第二层可能都划分变量的所在列
            labelCounts = {}

            for featVec in range(0, len(newData)):  # 遍历整个数据集，每次取一行
                fea = newData.iloc[featVec, line]
                if fea not in labelCounts.keys():
                    labelCounts[fea] = 0
                labelCounts[fea] += 1
            for key in labelCounts:#对于每个子节点再度划分
                key = str(key)
                tree.create_node(subtree.tag+"  "+key, subtree.tag+"  "+key, parent=subtree.tag, data=gailv(newData, line, key))  # 插入树节点，treelib不支持不同节点使用相同ID，所以才采用ID为第一层节点tag加上空格和第二层节点划分变量的方式

for i in range(0,len(test)):#测试集验证
    tag1 = str(test.iloc[i,fenzhi1+1])
    for key in fenzhi2:
        tag2 = str(test.iloc[i,key-1])
        if (tree.nodes.get(tag1+"  "+tag2,0)):#如果测试数据在划分的决策树内，进行下一步
            if(tree.nodes[tag1+"  "+tag2].data==test.iloc[i,-1]):
                correct += 1

            break
correct =str(correct/len(test))#计算准确率
print("准确率:"+ correct)
#tree.show()




