#! /usr/bin/python
# _*_ coding:utf-8 _*_
import numpy as np
from operator import itemgetter

class_value = ["yes", "no"]


# 统计每一类别中的样本总数
def sample_count_of_class(sample_value):
    classcount = {}
    # 计数
    for item in sample_value:
        if item[0] not in classcount.keys():
            classcount[item[0]] = 0
        classcount[item[0]] += 1
    return classcount


# 判定数据集中样本最多的是那一类并返回
def majority(sample_value):
    classcount = sample_count_of_class(sample_value)
    # 排序
    sorted_class_count = sorted(classcount.iteritems(), key=itemgetter(1), reverse=True)
    return sorted_class_count[0][0]


# 计算信息熵
def info_entropy(sample_value):
    classcount = sample_count_of_class(sample_value)  # 每一类样本个数字典
    (m, n) = sample_value.shape
    class_number = len(classcount)  # 分类的类数
    class_rate_verctor = np.array(classcount.values()).reshape(class_number, 1) / (m*1.0)  # 每一类样本频率向量
    entropy = (-1) * np.sum(class_rate_verctor * np.log2(class_rate_verctor))
    return entropy


# 计算信息增益
# siamples样本集
# siample_values为样本集对应的分类集
# property_index 属性索引0表示第0个属性既指样本集siamples的第一列是该样本的属性值
def info_gain(siamples, siample_values, property_index):
    # 统计属性property_index的可能取值
    (m, n) = siamples.shape
    property_values = siamples[:, property_index]  # 属性值
    property_values_disc = set(property_values)  # 属性值，去掉重复的行
    # sub_set_num = len(property_values_disc)  # 属性a划分的子集个数
    # sub_set_entropy_vector = np.zeros((sub_set_num, 1))  # 子集信息熵向量
    sub_set_entropy_sum = 0.0
    for i in property_values_disc:
        sub_row = np.where(siamples[:, property_index] == i)[0]  # 获取属性值为i的行索引
        # sub_set_entropy_vector[i, 0] = info_entropy(siample_values[sub_row, :])  # 计算属性值为i的信息熵
        sub_set_entropy_sum += (len(sub_row) / (m*1.0)) * info_entropy(siample_values[sub_row, :])
    gain = info_entropy(siample_values) - sub_set_entropy_sum  # 信息增益
    return gain


# 选择最优划分属性
def chose_best_feature_to_split(samples, sample_values, feature):
    # print samples
    # (m, n) = samples.shape
    # ---------------------------------------------
    n = len(feature)
    # 计算每一个属性的信息增益
    max_gain = 0.0  # 记录最大的信息增益值
    best_feature_index = 0  # 默认最有属性
    for i in range(0, n, 1):
        gain = info_gain(samples, sample_values, i)
        if gain > max_gain:
            max_gain = gain
            best_feature_index = i  # 默认最有属性
    # print best_feature_index
    return best_feature_index


# 根据最优属性值划分集合
def split_samples(samples, samples_values, best_feature_index, best_feature_value):
    # print samples

    row_num = np.where(samples[:, best_feature_index] == best_feature_value)[0]
    # sub_samples = np.zeros((len(row_num), 6))
    sub_samples = samples[row_num, :]
    sub_samples_values = samples_values[row_num, :]
    # print sub_samples_values
    return sub_samples, sub_samples_values


# 创建决策树
def tree_generate(samples, sample_values, feature):
    (m, n) = samples.shape
    # 集合里面的所有样本属于同一类，停止划分
    # print samples
    if np.sum(sample_values == sample_values[0, 0]) == m:
        return class_value[int(sample_values[0, 0])]  # 设置为叶节点，返回分类
    # 遍历完所有的特征，哪一类样本出现次数最多，返回哪一类别
    if len(feature) == 0:
        # 如果属性集合为空集或者样本在属性集上不能在划分
        # 返回类别标记样本数最多的类
        # print class_value(int(majority(sample_values)))
        return class_value[int(majority(sample_values))]

    # 从A中选择最优划分属性a*

    best_feat = chose_best_feature_to_split(samples, sample_values, feature)
    # for item in feature:
    #     print item
    # print best_feat
    best_feat_label = feature[best_feat]
    # 创建节点，用字典类型表示树结构
    tree = {best_feat_label: {}}
    # print len(feature)
    del(feature[best_feat])  # 从属性集中删除以选择的最优属性
    # 递归划分属性a的每一个子集
    feat_values = samples[:, best_feat]
    unique_vals = set(feat_values)
    # (sub_samples, sub_samples_values) = split_samples(samples, samples_value, best_feat, 0)
    # print sub_samples
    for item in unique_vals:
        sub_feature = feature[:]
        (sub_samples, sub_sample_values) = split_samples(samples, sample_values, best_feat, item)
        # print sub_samples
        tree[best_feat_label][int(item)] = tree_generate(sub_samples, sub_sample_values, sub_feature)


    # 迭代a*的每一个值
    return tree


if __name__ == "__main__":
    # 读取样本数据集
    train_set = np.loadtxt("prepare_data.txt", delimiter=",")
    # print train_set
    (m, n) = train_set.shape
    samples = train_set[:, 0: n-1]  # 样本特征
    samples_value = train_set[:, -1].reshape(m, 1) # 样本结果

    file = open("property.txt", "r")
    feature = file.readline().split(" ")  # 属性数据集
    # feature = ["色泽", "根蒂"]
    file.close()

    # testdata1 = np.array([[1],[1],[1],[1],[0],[0],[0],[0],[0]])
    # best = chose_best_feature_to_split(samples, samples_value)
    # print best
    tree = tree_generate(samples, samples_value, feature)
    print tree
    # print tree['纹理'][2]['根蒂'][1]['色泽'][2].keys()[0]
    # split_samples(samples, samples_value, 1, 1)
