# -*- coding: utf-8 -*-
"""
Created on Tue May 14 11:31:28 2019

@author: Soly Liang
"""
import TrainTree as tt
from DrawTree import draw
import numpy as np
from sklearn import datasets
from verify import *
'''
==================================================
                导入数据集
    其中数据集1,2从sklearn模块导入,3,4从文件导入
==================================================
'''
#----------------1.iris鸢尾花数据集----------------
# 特征数4，均为连续型数据
# 类别数3，代表鸢尾花类型
# 样本数: 150+150+150
iris=datasets.load_iris()
X1=iris["data"]
Y1=iris["target"]
FeatureName1=['萼片长(cm)','萼片宽(cm)','花瓣长(cm)','花瓣宽(cm)']
LabelName1={0:'1.setosa',1: '2.versicolor',2: '3.virginica'}
#----------------2.wine酒类数据集-------------------
# 特征数13,均为连续型数据
# 类别数3，代表酒的类别
# 样本数:59+71+48
wine= datasets.load_wine()
X2=wine["data"]
Y2=wine["target"]
FeatureName2=list(wine['feature_names'])
LabelName2={0:'0类',1: '1类',2: '2类'}

#----------------3.ionosphere电离层数据集----------------
# 特征数34，均为连续数值型数据
# 类别数2
path='ionosphere.data'
ionosphere=load_ionosphere(path)
X3=ionosphere["data"]
Y3=ionosphere["target"]
FeatureName3=ionosphere['feature_names']
LabelName3=ionosphere['label_names']

#----------------4.yeast蛋白质定位数据集----------------
# 特征数：8个连续型
# 分类数:10
path='yeast.data'
yeast=load_yeast(path)
X4=yeast["data"]
Y4=yeast["target"]
FeatureName4=yeast['feature_names']
LabelName4=yeast['label_names']

'''
==================================================
   检验对率回归作为属性划分选择时不同设置下的结果
        归一化方式：None,min-max,z-score
        规则化系数：0.1,0.01,0.0001
        总共9种情况
        剪枝方法统一采用不剪枝
==================================================
'''

#------------------参数设置--------------------
cut=['after']
rule=['LogisticReg']
normal=[None,'min-max','z-score']
lamuda=[0.1,0.01,0.0001]

#------------------对于数据集1--------------------
print('=============\n考察第1个数据集：鸢尾花\n=============')
erro1=hold_out(X1,Y1,FeatureName1,LabelName1,24,0.7,
               cut=cut,rule=rule,normal=normal,lamuda=lamuda)
order1=erro_to_order(erro1)

#------------------对于数据集2--------------------
print('=============\n考察第2个数据集：酒类\n=============')
erro2=hold_out(X2,Y2,FeatureName2,LabelName2,24,0.7,
               cut=cut,rule=rule,normal=normal,lamuda=lamuda)
order2=erro_to_order(erro2)
#------------------对于数据集3--------------------
print('=============\n考察第3个数据集：电离层\n=============')
erro3=hold_out(X3,Y3,FeatureName3,LabelName3,24,0.7,
               cut=cut,rule=rule,normal=normal,lamuda=lamuda)
order3=erro_to_order(erro3)

#------------------对于数据集4--------------------
print('=============\n考察第4个数据集：蛋白质定位\n=============')
erro4=hold_out(X4,Y4,FeatureName4,LabelName4,24,0.7,
              cut=cut,rule=rule,normal=normal,lamuda=lamuda)
order4=erro_to_order(erro4)


print('不同算法设置、不同数据集下的错误率排序结果')
print(['算法'+str(key) for key in order1.keys()])
print('数据集1:')
for i in range(len(order1)):
    print('%d(%.1f%%)'%(list(order1.values())[i],list(erro1.values())[i]*100),end=',')
print('\n数据集2:')
for i in range(len(order2)):
    print('%d(%.1f%%)'%(list(order2.values())[i],list(erro2.values())[i]*100),end=',')
print('\n数据集3:')
for i in range(len(order1)):
    print('%d(%.1f%%)'%(list(order3.values())[i],list(erro3.values())[i]*100),end=',')
print('\n数据集4:')
for i in range(len(order1)):
    print('%d(%.1f%%)'%(list(order4.values())[i],list(erro4.values())[i]*100),end=',')

order=np.array([list(order1.values()),
                list(order2.values()),list(order3.values()),list(order4.values())])
print('平均序值：',list(np.mean(order,0)))

mean_order=np.mean(order,0)
N,k=order.shape
Tx2=12*N/k/(k+1)*(sum(mean_order**2)-k*(k+1)**2/4)   #计算书本上(2.34)
TF=(N-1)*Tx2/(N*(k-1)-Tx2)
print('计算得到的TF值为:',TF)

'''
==================================================
        比较三种属性选择规则，三种剪枝下的结果
        划分方式：InfoGain,Gini,LogisticReg
        剪枝方式：不剪值，预剪枝，后剪枝
        总共9种情况
        对于LogisticReg，统一采用normal=None,lamuda=0.01的设置
==================================================
'''

#------------------参数设置--------------------
rule=['InfoGain','Gini','LogisticReg']
cut=[None,'pre','after']
normal=['min-max']
lamuda=[0.01]

#------------------对于数据集1--------------------
print('=============\n考察第1个数据集：鸢尾花\n=============')
erro1=hold_out(X1,Y1,FeatureName1,LabelName1,24,0.7,
               rule=rule,cut=cut,normal=normal,lamuda=lamuda)
order1=erro_to_order(erro1)

#------------------对于数据集2--------------------
print('=============\n考察第2个数据集：酒类\n=============')
erro2=hold_out(X2,Y2,FeatureName2,LabelName2,24,0.7,
               rule=rule,cut=cut,normal=normal,lamuda=lamuda)
order2=erro_to_order(erro2)

#------------------对于数据集3--------------------
print('=============\n考察第3个数据集：电离层\n=============')
erro3=hold_out(X3,Y3,FeatureName3,LabelName3,24,0.7,
               rule=rule,cut=cut,normal=normal,lamuda=lamuda)
order3=erro_to_order(erro3)

#------------------对于数据集4--------------------
print('=============\n考察第4个数据集：蛋白质定位\n=============')
erro4=hold_out(X4,Y4,FeatureName4,LabelName4,24,0.7,
              rule=rule,cut=cut,normal=normal,lamuda=lamuda)
order4=erro_to_order(erro4)


print('不同算法设置、不同数据集下的错误率排序结果')
print(['算法'+str(key) for key in order1.keys()])
print('数据集1:')
for i in range(len(order1)):
    print('%d(%.1f%%)'%(list(order1.values())[i],list(erro1.values())[i]*100),end=',')
print('\n数据集2:')
for i in range(len(order2)):
    print('%d(%.1f%%)'%(list(order2.values())[i],list(erro2.values())[i]*100),end=',')
print('\n数据集3:')
for i in range(len(order1)):
    print('%d(%.1f%%)'%(list(order3.values())[i],list(erro3.values())[i]*100),end=',')
print('\n数据集4:')
for i in range(len(order1)):
    print('%d(%.1f%%)'%(list(order4.values())[i],list(erro4.values())[i]*100),end=',')

order=np.array([list(order1.values()),
                list(order2.values()),list(order3.values()),list(order4.values())])
print('平均序值：',list(np.mean(order,0)))

mean_order=np.mean(order,0)
N,k=order.shape
Tx2=12*N/k/(k+1)*(sum(mean_order**2)-k*(k+1)**2/4)   #计算书本上(2.34)
TF=(N-1)*Tx2/(N*(k-1)-Tx2)
print('计算得到的TF值为:',TF)