# -*- coding: utf-8 -*-
"""
使用信息熵寻找最优划分
Created on Thu Apr 26 16:46:03 2018

@author: Allen
"""
import numpy as np
import matplotlib.pyplot as plt

from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 2:]
y = iris.target

### 模拟使用信息熵进行划分

def split( X, y, d, value ):
    index_a = ( X[:, d] <= value )
    index_b = ( X[:, d] > value )
    return X[index_a], X[index_b], y[index_a], y[index_b]

from collections import Counter
def entropy( y ):
    counter = Counter( y )
    res = 0.
    for num in counter.values():
        p = num / len( y )
        res += -p * np.log( p )
    return res

def try_split( X, y ):
    best_entropy = float( 'inf' )
    best_d, best_v = -1, -1
    for d in range( X.shape[1] ):
        sorted_index = np.argsort( X[:, d] )
        for i in range( 1, len( X ) ):
            if X[sorted_index[i-1], d] != X[sorted_index[i], d]:
                v = (X[sorted_index[i-1], d] + X[sorted_index[i], d]) / 2
            X_l, X_r, y_l, y_r = split( X, y, d, v )
            e = entropy( y_l ) + entropy( y_r )
            if e < best_entropy:
                best_entropy, best_d, best_v = e, d, v
    return best_entropy, best_d, best_v


best_entropy, best_d, best_v = try_split( X, y ) # (0.69314718055994529, 0, 2.4500000000000002)
'''
信息熵：0.69314718055994529
维度：0
决策边界：2.4500000000000002
结果表明：在第0维，数值在2.24，信息熵最小
'''
X1_l, X1_r, y1_l, y1_r = split( X, y, best_d, best_v )

best_entropy2, best_d2, best_v2 = try_split( X1_l, y1_l )
'''
信息熵：0.0
维度：0
决策边界：1.05
结果表明：信息熵为0，无需再分类
'''
entropy( y1_l ) # 0.0
entropy( y1_r ) # 0.69314718055994529

best_entropy3, best_d3, best_v3 = try_split( X1_r, y1_r )
'''
信息熵：0.41322788993619042
维度：1
决策边界：1.75
结果表明：
'''
x2_l, X2_r, y2_l, y2_r = split( X1_r, y1_r, best_d3, best_v3 )
print( entropy( y2_l ) ) # 0.308495450831
print( entropy( y2_r ) ) # 0.104732439105