# -*- encoding: utf-8 -*-
"""
随机森林算法：
1. 读取原始数据
2. 交叉验证: 将数据分为五份，每次选取一份作为测试数据，其余作为训练数据，一共训练五次，产生五个结果
3. 放回式采样训练数据，采样比例为1:1，构建若干个决策树，决策树采用Gini split算法
    3.1 从特征中随机选取15个特征值
    3.2 遍历选取的特征值，根据Gini spilit算法找出所有选取特征中，最佳的划分点
    3.3 得到决策树的根节点{'index': index, 'value': value, 'group': (left, right)}
        其中left为索引为index的特征小于value的子数据集
           right为索引为index的特征大于value的子数据集
    3.4 循环执行3.1~3.3，递归构建决策树
4. 将每行测试数据分别代入若干个决策树，选出占比最多分类结果的作为最终结果
5. 将分类结果与实际结果比较计算正确率
"""
from csv import reader
from random import randrange
from random import seed
import math


def load_csv():
    """读取csv中数据保存到矩阵中"""
    data_set = []
    file_path = 'sonar-all-data.csv'
    with open(file_path, 'r') as f:
        csv_data = reader(f)
        for row in csv_data:
            if not row:
                continue
            data_set.append([float(r) for r in row[:-1]] + [row[-1]])
    return data_set


def cross_split(data_set, split_num):
    """将数据即随机划分为split_num份"""
    seed(1)
    data_set_copy = list(data_set)
    sub_data_set_count = math.ceil(len(data_set) / split_num)
    data_set_split = []
    for i in range(split_num - 1):
        sub_data_set = []
        while len(sub_data_set) < sub_data_set_count:
            random_index = randrange(len(data_set_copy))
            sub_data_set.append(data_set_copy.pop(random_index))
        data_set_split.append(sub_data_set)
    return data_set_split


def sub_sample(train_set):
    """有放回的抽样"""
    sub_sample_set = []
    while len(sub_sample_set) < len(train_set):
        round_index = randrange(len(train_set))
        sub_sample_set.append(train_set[round_index])
    return sub_sample_set


def split_by_value(data_set, index, value):
    """根据index和value将dataset划分成两部分"""
    less, greater = [], []
    for row in data_set:
        if row[index] < value:
            less.append(row)
        else:
            greater.append(row)
    return less, greater


def cal_gini_index(data_set):
    """计算gini index"""
    class_list = [row[-1] for row in data_set]
    class_set = set(class_list)
    prob = 0.0
    for cls in class_set:
        prob += (class_list.count(cls) / len(class_list)) ** 2
    return 1 - prob


def cal_gini_split(less, greater):
    """计算gini split"""
    less_prob = len(less) / (len(less) + len(greater))
    greater_prob = len(greater) / (len(less) + len(greater))
    return less_prob * cal_gini_index(less) + greater_prob * cal_gini_index(greater)


def get_best_split(data_set, n_features):
    """随机选取n_features个特征值，找出最佳分隔点"""
    least_gini_split = 999
    best_index = 999
    best_value = 999
    best_groups = ([], [])
    features = []
    while len(features) < n_features:
        features.append(randrange(len(data_set[0]) - 1))
    for index in features:
        for row in data_set:
            less, greater = split_by_value(data_set, index, row[index])
            gini_split = cal_gini_split(less, greater)
            if gini_split < least_gini_split:
                least_gini_split = gini_split
                best_index = index
                best_value = row[index]
                best_groups = (less, greater)
    return {'index': best_index, 'value': best_value, 'groups': best_groups}


def get_max_classify(data_set):
    """计算数据集中数量最大的分类标签"""
    class_list = [row[-1] for row in data_set]
    return max(set(class_list), key=class_list.count)


def split(node, max_depth, min_size, n_features, depth):
    """
    构建决策树递归函数:
    终止条件：
        1. 划分的数据集只有一类，node['groups'] 中的list其中一个为空
        2. 决策树深度超过最大限制
    """
    left, right = node['groups']
    del node['groups']
    if not left or not right:
        node['left'] = node['right'] = get_max_classify(left + right)
        return
    if depth >= max_depth:
        node['left'], node['right'] = get_max_classify(left), get_max_classify(right)
        return
    if len(left) <= min_size:
        node['left'] = get_max_classify(left)
    else:
        node['left'] = get_best_split(left, n_features)
        split(node['left'], max_depth, min_size, n_features, depth + 1)
    if len(right) <= min_size:
        node['right'] = get_max_classify(right)
    else:
        node['right'] = get_best_split(right, n_features)
        split(node['right'], max_depth, min_size, n_features, depth + 1)


def build_decision_tree(sub_sample_set, max_depth, min_size, n_features):
    """构建决策树"""
    # 构建根节点
    root = get_best_split(sub_sample_set, n_features)
    # 递归构建完整决策树
    split(root, max_depth, min_size, n_features, 1)
    return root


def predict(tree, row):
    """将测试数据传入决策树预测结果"""
    if row[tree['index']] < tree['value']:
        if isinstance(tree['left'], dict):
            return predict(tree['left'], row)
        else:
            return tree['left']
    else:
        if isinstance(tree['right'], dict):
            return predict(tree['right'], row)
        else:
            return tree['right']


def bagging_predict(trees, row):
    """将测试数据分别代入所有决策树中，返回得到数量最多的分类结果"""
    predicts = [predict(tree, row) for tree in trees]
    return max(set(predicts), key=predicts.count)


def random_forest(train_set, test_set, tree_num, max_depth, min_size, n_features):
    """有放回式采样构建tree_num个决策树"""
    trees = []
    for i in range(tree_num):
        sub_sample_set = sub_sample(train_set)
        tree = build_decision_tree(sub_sample_set, max_depth, min_size, n_features)
        trees.append(tree)
    predicted = [bagging_predict(trees, row) for row in test_set]
    return predicted


def cal_current_rate(predicted, actual):
    """计算正确率"""
    current = 0
    for i in range(len(actual)):
        if predicted[i] == actual[i]:
            current += 1
    return current / len(actual) * 100.0


def main(split_num, tree_num, max_depth, min_size, n_features):
    scores = []
    data_set = load_csv()
    data_set_split = cross_split(data_set, split_num)
    for sub_data_set in data_set_split:
        test_set = list(sub_data_set)
        train_set = list(data_set_split)
        train_set.remove(sub_data_set)
        train_set = sum(train_set, [])
        predicted = random_forest(train_set, test_set, tree_num, max_depth, min_size, n_features)
        actual = [row[-1] for row in test_set]
        score = cal_current_rate(predicted, actual)
        scores.append(score)
    return scores


if __name__ == '__main__':
    # 交叉验证划分份数
    split_num = 5
    # 构建决策树数量
    trees_num_list = (1, 10, 20)
    max_depth = 20
    min_size = 1
    n_features = 15  # 随机选取n_features个特征作为划分特征，小于总特征数，一般为特征数的开根号
    for tree_num in trees_num_list:
        scores = main(split_num, tree_num, max_depth, min_size, n_features)
        aver_score = sum(scores) / len(scores)
        print(tree_num, ':', scores, '平均分:', aver_score)
