#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: zzx
# Create: '2018/4/6'

import operator
from math import log

import pandas as pd

from src.decision_tree import treePlotter


# 计算给定数据集的香农熵
def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    # 为所有可能分类创建字典
    for featVec in dataSet:
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 1
        else:
            labelCounts[currentLabel] += 1  # 每个键值记录了当前类别的出现次数

    shannonEnt = 0.0

    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries
        shannonEnt -= prob * log(prob, 2)
    return shannonEnt


def splitDateSet(data_set, axis, value):
    """
        # 度量花费数据集的熵，以便判断当前是否正确划分数据集,以下是按照给定特征划分数据集
        # 仅限离散类型！！
        :param data_set:待划分的数据集
        :param axis:划分数据集的特征
        :param value:特征的返回值
    """
    return data_set[data_set[:, axis] == value]

# 连续属性上对选取的值计算条件熵，用于选出分割点（阈值）
def cal_continue_data_set_info(dataSet, axis, value):
    left_data_set = dataSet[dataSet[:, axis] <= value]
    right_data_set = dataSet[dataSet[:, axis] > value]
    prob = len(left_data_set) / float(len(dataSet))
    return prob * calcShannonEnt(left_data_set) + (1-prob)*calcShannonEnt(right_data_set)




# 选择最好的数据集划分方式
def chooseBestFeatureToSplit(dataSet):
    numFeature = len(dataSet[0]) - 1
    baseEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0
    bestFeature = -1
    best_feature_unique = set()
    best_threshold = 0.0

    for i in range(numFeature):
        # 创建唯一的分类标签列表
        featList = [example[i] for example in dataSet]
        newEntropy = 0.0
        # 计算每种划分方式的信息熵
        if len(dtype) > 0 and (dtype[list(dtype.keys())[i]] == float):
            # 连续
            uniqueVals = sorted(set(featList))
            for j in range(len(uniqueVals)):
                if j >= len(uniqueVals) - 1:
                    break
                value = (uniqueVals[j] + uniqueVals[j + 1]) / 2.0
                entropy = cal_continue_data_set_info(dataSet, i, value)
                if entropy < newEntropy or newEntropy <= 0:
                    newEntropy = entropy
                    best_threshold = value
        else:
            # 离散
            uniqueVals = list(set(featList))
            for j in range(len(uniqueVals)):
                value = uniqueVals[j]
                subDataSet = splitDateSet(dataSet, i, value)
                prob = len(subDataSet) / float(len(dataSet))
                newEntropy += prob * calcShannonEnt(subDataSet)

        infoGain = baseEntropy - newEntropy

        # 计算最好的信息增益
        if (infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
            if len(dtype) > 0 and (dtype[list(dtype.keys())[bestFeature]] == float):
                best_feature_unique = best_threshold
            else:
                best_feature_unique = uniqueVals
    return bestFeature, best_feature_unique


# 采用多数表决的方法来决定叶子结点的分类，字典对象存储classList中每个类标签的出现频率，返回出现次数最多的分类名称
def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys(): classCount[vote] = 0
        classCount[vote] += 1
        sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
        return sortedClassCount[0][0]


# 创建树
def createTree(dataSet, labels):
    classList = [example[-1] for example in dataSet]
    # 类别完全相同则停止继续划分
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    # 遍历完所有特征时返回出现次数最多的

    if len(dataSet[0]) == 1:
        return majorityCnt(classList)

    # bestFeat 选出的最优划分特征的index
    # uniqueVals 该最优特征的可取值范围：离散型是一个set集合; 连续型是一个float值（范围为<= 和 > 该值）
    bestFeat, uniqueVals = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel: {}}

    if isinstance(uniqueVals, float):  # 连续型
        left_data_set = dataSet[dataSet[:, bestFeat] <= uniqueVals]
        right_data_set = dataSet[dataSet[:, bestFeat] > uniqueVals]
        left_key = '<='+str(round(uniqueVals, 3))
        right_key = '>'+str(round(uniqueVals, 3))
        uniqueVals = {left_key:left_data_set,right_key:right_data_set}
        for value in uniqueVals.keys():
            retDataSet = uniqueVals[value]
            tree = createTree(retDataSet, labels)
            if isinstance(tree, dict) and bestFeatLabel in tree.keys():
                myTree[bestFeatLabel].update(tree[bestFeatLabel])
            else:
                myTree[bestFeatLabel][value] = tree
    else:
        # 离散型
        for value in uniqueVals:
            retDataSet = splitDateSet(dataSet, bestFeat, value)
            myTree[bestFeatLabel][value] = createTree(retDataSet, labels)

    return myTree


#   定义数据
def createDataSet(filename, dtype):

    ds = loadData(filename,dtype)
    dataSet = ds.values

    labels = ds.columns.tolist()[:-1]
    print(labels)
    return dataSet, labels


def loadData(filename, dtype):
    '''
    输入：文件
        dtye: 各列的数据类型 dict类型
    输出：csv数据集
    '''
    dataset = pd.read_csv(filename, dtype=dtype)
    return dataset


if __name__ == '__main__':
    global dtype # 各列的数据类型

    # 离散型数据
    # filename = "test_id3.csv"
    # dtype = {}

    # 连续型数据
    filename = "../../data/iris_all.csv"
    dtype = {'Sepal.Length': float, 'Sepal.Width': float, 'Petal.Length': float, 'Petal.Width': float}

    dataSet, labels = createDataSet(filename,dtype)
    labels_tmp = labels[:]
    decisionTree = createTree(dataSet, labels_tmp)
    treePlotter.createPlot(decisionTree)
