# -*- coding: utf-8 -*-
"""
Created on Tue May  7 13:25:21 2019

@author: Soly Liang
"""
import numpy as np

def AnalysisData(X):
    # 分析数据
    # DataType 返回每个属性的数据类型，若为字符串类型，说明是离散型，并统计各个可能取值
    # X1 返回数字化的数据，将离散型数值转变为相应取值的编号
    m=len(X)     #样本数
    n=len(X[0])  #属性数
    DataType=[]
    for p in range(n):
        DataType.append([type(X[0][p])])
        if DataType[p][0]==str:
            for q in range(m):
                if X[q][p] not in DataType[p]:
                    DataType[p].append(X[q][p])
    X1=[]   #数字化的数据
    for p in range(m):
        temp=[]
        for q in range(n):
            if DataType[q][0]==str:
                temp.append(DataType[q].index(X[p][q]))
            else:
                temp.append(X[p][q])
        X1.append(temp)
    return DataType,X1

def most(Y):
    # 统计样本集中的多数类
    Yvalue=[]  #Y取值
    Ynum=[]    #取值数目
    for y in Y:
        if y not in Yvalue:
            Yvalue.append(y)
            Ynum.append(1)
        else:
            Ynum[Yvalue.index(y)]+=1
    try:
        Yvalue[Ynum.index(max(Ynum))]
    except:
        print(Yvalue,Ynum)  
    return Yvalue[Ynum.index(max(Ynum))]

def Index(Y,rule='InfoGain'):
    #计算信息熵或者基尼系数
    if len(Y)==0:
        if rule=='InfoGain':
            return 0
        elif rule=='Gini':
            return 1
    Yvalue=[]  #Y取值
    Ynum=[]    #取值数目
    for y in Y:
        if y not in Yvalue:
            Yvalue.append(y)
            Ynum.append(1)
        else:
            Ynum[Yvalue.index(y)]+=1
    pk=np.array(Ynum)/sum(Ynum)
    if rule=='InfoGain':
        return sum(-pk*np.log2(pk))
    elif rule=='Gini':
        return 1-sum(pk**2)  

def score(X,Y,considerfeature,DataType,rule='InfoGain'):
    f=considerfeature
    if DataType[f][0]==str:  #若该特征取值为离散型
        Gain=Index(Y) #信息增益初始值
        Gini=0
        for v in range(1,len(DataType[f])):
            Yv=Y[X[:,f]==v]
            Gain-=Index(Yv)*len(Yv)/len(Y)
            Gini+=Index(Yv,'Gini')*len(Yv)/len(Y)
        Gain=[Gain]
        Gini=[Gini]
    else:   #若该特征取值为连续型
        Gain=[]
        Gini=[]
        values=np.array(list(set(X[:,f])))  #利用集合去除重复取值
        t=(np.sort(values)[:-1]+np.sort(values)[1:])/2  #候选划分点
        for tt in t:
            Y1=Y[X[:,f]<=tt]; Y2=Y[X[:,f]>tt]
            Gain.append(Index(Y)-Index(Y1)*len(Y1)/len(Y)-Index(Y2)*len(Y2)/len(Y))
            Gini.append(Index(Y1,'Gini')*len(Y1)/len(Y)+Index(Y2,'Gini')*len(Y2)/len(Y))
        Gain=[max(Gain),t[Gain.index(max(Gain))]]
        Gini=[min(Gini),t[Gini.index(min(Gini))]]
    if rule=='InfoGain':
        return Gain
    if rule=='Gini':
        return Gini

def choose(X,Y,FeatureName,considerfeature,DataType,rule='InfoGain',equalchoose=False):
    scores=[]
    for f in considerfeature:
        scores.append(score(X,Y,f,DataType,rule))
    values=[v[0] for v in scores]
    if rule=='InfoGain':
        bestvalue=max(values)
    if rule=='Gini':
        bestvalue=min(values)

    #当多个属性值对应的信息增益/基尼系数相等时，提示选择其中一个
    if values.count(bestvalue)!=1 and equalchoose:
        index=np.arange(len(values))[values==bestvalue]
        print('现有%d个属性所得信息增益/基尼系数相等,请从中选择一个：'%len(index))
        k=1
        for i in index:
            print('%d:%s'%(k,FeatureName[considerfeature[i]]),end=', ')
            k+=1
        index=index[eval(input())-1]
    else:
        index=values.index(bestvalue)

    if len(scores[index])>1:  #返回最佳属性编号，若为连续取值，还要返回划分点
        return [considerfeature[index],scores[index][1]]
    else:
        return [considerfeature[index]]

def precut(Xt,Yt,Xv,Yv,feature,DataType,equalcut=True):
    # 预剪枝子函数
    right0=sum(Yv==most(Yt)) #划分前分类正确数,(多数类分类正确)
    right1=0                 #划分后分类正确数
    for k in range(1,len(DataType[feature])):
        Ytk=Yt[Xt[:,feature]==k]
        Yvk=Yv[Xv[:,feature]==k]
        if len(Yvk)!=0:
            right1+=sum(Yvk==most(Ytk))
    return (right0>right1)|((right0==right1) & equalcut)
    #返回True表示要划分，False表示不划分

def aftercut(tree,Xt,Yt,Xv,Yv,FeatureName,LabelName,DataType,equalcut=False):
    # 后剪枝子函数,递归方法实现
    #--------------计算剪枝后正确分类数-------------
    right1=sum(Yv==most(Yt))  #剪枝后的正确分类数(将树变为叶结点，类别为训练集多数类)
    #--------------计算剪枝前正确分类数-------------
    right0=0
    NodeText=list(tree.keys())[0]              #NodeText为结点文本
    f=FeatureName.index(NodeText.strip('=?'))  #f为所考察属性的索引号
    subs=tree[NodeText]                        #subs为分支内容
    for key in subs.keys():
        k=DataType[f].index(key)   #将分支取值'key'转换为序号第k个
        Xtk=Xt[Xt[:,f]==k,:]
        Ytk=Yt[Xt[:,f]==k]
        Xvk=Xv[Xv[:,f]==k,:]
        Yvk=Yv[Xv[:,f]==k]
        if type(subs[key])==dict: #如果该分支含有子树,则对子树进行后剪枝，并返回剪枝结果和正确分类数
            CuttedTree,right=aftercut(subs[key],Xtk,Ytk,Xvk,Yvk,FeatureName,LabelName,DataType,equalcut)
            tree[NodeText][key]=CuttedTree
            right0+=right
        else:#如果该分支为叶结点
            # ↓该叶结点所属类号
            y=list(LabelName.keys())[list(LabelName.values()).index(subs[key])]
            right0+=sum(Yvk==y)

    #print(NodeText,'   剪枝前:',right0,'剪枝后:',right1)
    if (right1>right0)|((right1==right0) and equalcut): #剪后正确数增加了,或者不变且要剪
        return LabelName[most(Yt)],right1
        
    else:
        return tree,right0

def CreatTree(Xt,Yt,Xv,Yv,FeatureName,LabelName,considerfeature='all',equalchoose=False,DataType=None,rule='InfoGain',cut=None,equalcut=True,deep=0):
    #训练决策树
    #-------------初始化--------------
    if considerfeature=='all':   #初始时的'all'表示考虑所有特征
        considerfeature=list(range(len(FeatureName)))
    if DataType==None:           #初始时需要分析数据，将X数字化，并得到每个X的数据类型
        mt=len(Yt)
        mv=len(Yv)
        DataType,X=AnalysisData(Xt+Xv)
        X=np.array(X)
        Xt=X[:mt,:]
        Xv=X[mt:,:]
        Yt=np.array(Yt)
        Yv=np.array(Yv)
    #------ --考察几种特殊情况---------
    if (Yt==Yt[0]).all():      #若所有样本属于同一类
        return LabelName[Yt[0]]
    if len(considerfeature)==0:  #若待考察特征为空
        return LabelName[most(Yt)]
    if (Xt[:,considerfeature]==Xt[0,considerfeature]).all():  #若所有样本取值相同
        return LabelName[most(Yt)]
    #--------最优划分属性和预剪枝-------
    BestFeature=choose(Xt,Yt,FeatureName,considerfeature,DataType,rule,equalchoose)  #选择最佳划分属性
    bf=BestFeature[0]
    if cut=='pre' and precut(Xt,Yt,Xv,Yv,bf,DataType,equalcut):
        return LabelName[most(Yt)]
    #------------分支-----------------
    if len(BestFeature)==1:  #若为离散型属性
        nt=FeatureName[bf]+'=?'  #nt意为nodetext
        tree={nt:{}}
        considerfeature.remove(bf)
        for k in range(1,len(DataType[bf])):
            Xtk=Xt[Xt[:,bf]==k,:]
            Ytk=Yt[Xt[:,bf]==k]
            Xvk=Xv[Xv[:,bf]==k,:]
            Yvk=Yv[Xv[:,bf]==k]
            traink=[]; n=0
            if len(Ytk)==0:  #若该分支的训练集为空
                tree[nt][DataType[bf][k]]=LabelName[most(Yt)]
            else:
                tree[nt][DataType[bf][k]]=CreatTree(Xtk,Ytk,Xvk,Yvk,FeatureName,LabelName,considerfeature.copy(),equalchoose,DataType,rule,cut,equalcut,deep+1)
    else: #若为连续型属性
        nt=FeatureName[bf]  #nt意为nodetext
        tree={nt:{}}
        t=BestFeature[1]  #划分点
        #----------'<=t'分支-----------
        Xt1=Xt[Xt[:,bf]<=t,:]; Yt1=Yt[Xt[:,bf]<=t]
        Xv1=Xv[Xv[:,bf]<=t,:]; Yv1=Yv[Xv[:,bf]<=t]
        tree[nt]['<=%.3f'%t]=CreatTree(Xt1,Yt1,Xv1,Yv1,FeatureName,LabelName,considerfeature.copy(),equalchoose,DataType,rule,cut,equalcut,deep+1)
        Xt2=Xt[Xt[:,bf]>t,:]; Yt2=Yt[Xt[:,bf]>t]
        Xv2=Xv[Xv[:,bf]>t,:]; Yv2=Yv[Xv[:,bf]>t]
        tree[FeatureName[bf]]['>%.3f'%t]=CreatTree(Xt2,Yt2,Xv2,Yv2,FeatureName,LabelName,considerfeature.copy(),equalchoose,DataType,rule,cut,equalcut,deep+1)
    
    if cut=='after' and deep==0: #后剪枝
        tree,right=aftercut(tree,Xt,Yt,Xv,Yv,FeatureName,LabelName,DataType,equalcut)
    return tree

