from math import log
from copy import deepcopy
from collections import Counter
from pprint import PrettyPrinter

train_path = "./ID3/traindata.txt"
test_path = "./ID3/testdata.txt"

train_list = []
test_list = []

with open(train_path, 'r') as f_train:
    for line in f_train:
        line = line.split()
        if len(line) == 5:
            train_list.append(list(map(float, line)))

with open(test_path, 'r') as f_test:
    for line in f_test:
        line = line.split()
        if len(line) == 5:
            test_list.append(list(map(float, line)))


def informationEntropy(s):
    # 计算信息熵
    category_list = []
    h = 0
    for item in s:  # 将例子集中标签列提取出来
        category_list.append(item[-1])
    category_set = list(set(category_list))  # 去重
    for category in category_set:
        num = 0
        for item in s:  # 计算每个标签在例子集中的出现次数
            if item[-1] == category:
                num += 1
        h -= (num / len(s)) * log(num / len(s), 2)  # 计算信息熵
    return h


def getSubSet(attr_sub_s, s, index):
    sub_s = []
    for item in s:
        if item[index] in attr_sub_s:
            sub_s.append(item)
    return sub_s


def getBestNode(s, sorted_index=[]):
    node_list = []
    for attr_index in range(len(train_list[0]) - 1):
        if attr_index in sorted_index:
            continue
        conditional_entropy = 0  # 存储条件熵

        sorted_index_new = sorted_index.copy()
        sorted_index_new.append(attr_index)
        sorted_index_new.sort()
        # 得到当前列全部属性值
        attr_list = [item[attr_index] for item in s]  # 存储当前属性在例子集中的全部值（包括重复）
        # 去重
        attr_set = list(set(attr_list))
        # 按属性值从小到大排序
        attr_set.sort()
        # 得到中间属性值，并按其将属性集划分为两部分
        split_index = len(attr_set) // 2
        attr_set_lt = attr_set[:split_index]  # 小于
        attr_set_egt = attr_set[split_index:]  # 大于等于
        # 按属性集取得子集
        set_lt = getSubSet(attr_set_lt, s, attr_index)
        set_egt = getSubSet(attr_set_egt, s, attr_index)
        # 由子集计算得到信息熵
        h_lt = informationEntropy(set_lt)
        h_egt = informationEntropy(set_egt)
        # 计算条件熵
        conditional_entropy += (len(set_lt) / len(s)) * h_lt
        conditional_entropy += (len(set_egt) / len(s)) * h_egt

        node_list.append(
            {'sort_index': attr_index,  # 决策属性
             'sorted_index': sorted_index_new,  # 已划分属性集合
             'split_attr': attr_set[split_index],  # 划分属性值（中位数）
             'set_lt': set_lt,  # 子集
             'set_egt': set_egt,  # 子集
             'conditional_entropy': conditional_entropy  # 条件熵
             })

    # node_list为0，说明所有属性都已被划分，为叶子节点
    if len(node_list) == 0:
        return None

    # 找到条件熵最大的节点
    best_node = deepcopy(node_list[0])
    for node in node_list:
        if node['conditional_entropy'] > best_node['conditional_entropy']:
            best_node = deepcopy(node)

    return best_node


def getBestLabel(s):
    label_list = [item[-1] for item in s]
    count_dict = Counter(label_list)
    label_best = max(count_dict, key=count_dict.get)
    return label_best


def creatTree(node):
    if not node:
        return None

    label_lt = None
    label_egt = None

    node_lt = getBestNode(node['set_lt'], node['sorted_index'])
    if not node_lt:
        label_lt = getBestLabel(node['set_lt'])
    node_egt = getBestNode(node['set_egt'], node['sorted_index'])
    if not node_egt:
        label_egt = getBestLabel(node['set_egt'])

    tree = {
        'sort_index': node['sort_index'],  # 决策属性
        'split_attr': node['split_attr'],  # 决策属性值
        'tree_lt': creatTree(node_lt),  # 子树
        'tree_egt': creatTree(node_egt),  # 子树
        'label_lt': label_lt,  # 标签
        'label_egt': label_egt  # 标签
    }
    return tree


def sort(test_item, tree):
    sort_attr = test_item[tree['sort_index']]
    if sort_attr < tree['split_attr']:
        if tree['tree_lt']:
            return sort(test_item, tree['tree_lt'])
        return tree['label_lt']
    else:
        if tree['tree_egt']:
            return sort(test_item, tree['tree_egt'])
        return tree['label_egt']


begin_node = getBestNode(train_list)
sort_tree = creatTree(begin_node)
print('sort_tree:')
PrettyPrinter(indent=2).pprint(sort_tree)
print('--------------------')

correct = 0
for item in test_list:
    label_predict = sort(item, sort_tree)
    if label_predict == item[-1]:
        correct += 1
accuracy = correct / len(train_list)
print('accuracy:', accuracy)
