# -*- encoding: utf-8 -*-
"""
基于决策树的Adaptive Boosting算法：
1. 读取样本数据，将分类标签转换为 {1, -1}，其中 R = 1, M = -1
2. 初始化数据的权重1/N，N为数据总数
3. 用决策树求出一个分类器，代入数据回测，求出分类器的误差
4. 根据分类器的误差更新每个数据的权重
5. 循环步骤3、4 K次，不断求出新的弱分类器和更新数据的权重，最后得到K个弱分类器
6. 将数据代入所有的弱分类器，根据分类器的不同权重累加后经过sigmoid function函数，得出最后结果
"""

from csv import reader
from random import randrange
from random import seed
import math


def load_csv():
    """读取csv中数据保存到矩阵中"""
    data_set = []
    file_path = 'sonar-all-data.csv'
    with open(file_path, 'r') as f:
        csv_data = reader(f)
        for row in csv_data:
            if not row:
                continue
            if row[-1] == 'R':
                classify = 1
            elif row[-1] == 'M':
                classify = -1
            else:
                continue
            data_set.append([float(r) for r in row[:-1]] + [classify])
    return data_set


def init_data_weight(data_set):
    """初始化每个数据的权重"""
    data_count = len(data_set)
    weights = [1 / data_count] * data_count
    return weights


def split_by_value(data_set, index, value):
    """根据index和value将dataset划分成两部分"""
    less, greater = [], []
    for row in data_set:
        if row[index] < value:
            less.append(row)
        else:
            greater.append(row)
    return less, greater


def cal_gini_index(data_set):
    """计算gini index"""
    class_list = [row[-1] for row in data_set]
    class_set = set(class_list)
    prob = 0.0
    for cls in class_set:
        prob += (class_list.count(cls) / len(class_list)) ** 2
    return 1 - prob


def cal_gini_split(less, greater):
    """计算gini split"""
    less_prob = len(less) / (len(less) + len(greater))
    greater_prob = len(greater) / (len(less) + len(greater))
    return less_prob * cal_gini_index(less) + greater_prob * cal_gini_index(greater)


def get_best_split(data_set, n_features):
    """随机选取n_features个特征值，找出最佳分隔点"""
    least_gini_split = 999
    best_index = 999
    best_value = 999
    best_groups = ([], [])
    features = []
    while len(features) < n_features:
        features.append(randrange(len(data_set[0]) - 1))
    for index in features:
        for row in data_set:
            less, greater = split_by_value(data_set, index, row[index])
            gini_split = cal_gini_split(less, greater)
            if gini_split < least_gini_split:
                least_gini_split = gini_split
                best_index = index
                best_value = row[index]
                best_groups = (less, greater)
    return {'index': best_index, 'value': best_value, 'groups': best_groups}


def get_max_classify(data_set):
    """计算数据集中数量最大的分类标签"""
    class_list = [row[-1] for row in data_set]
    return max(set(class_list), key=class_list.count)


def split(node, max_depth, min_size, n_features, depth):
    """
    构建决策树递归函数:
    终止条件：
        1. 划分的数据集只有一类，node['groups'] 中的list其中一个为空
        2. 决策树深度超过最大限制
    """
    left, right = node['groups']
    del node['groups']
    if not left or not right:
        node['left'] = node['right'] = get_max_classify(left + right)
        return
    if depth >= max_depth:
        node['left'], node['right'] = get_max_classify(left), get_max_classify(right)
        return
    if len(left) <= min_size:
        node['left'] = get_max_classify(left)
    else:
        node['left'] = get_best_split(left, n_features)
        split(node['left'], max_depth, min_size, n_features, depth + 1)
    if len(right) <= min_size:
        node['right'] = get_max_classify(right)
    else:
        node['right'] = get_best_split(right, n_features)
        split(node['right'], max_depth, min_size, n_features, depth + 1)


def build_decision_tree(sub_sample_set, max_depth, min_size, n_features):
    """构建决策树"""
    # 构建根节点
    root = get_best_split(sub_sample_set, n_features)
    # 递归构建完整决策树
    split(root, max_depth, min_size, n_features, 1)
    return root


def divide_data_set(data_set, n):
    """随机划分数据分n份"""
    data_set_copy = list(data_set)
    divide_data_list = []
    sub_data_set_count = math.ceil(len(data_set_copy) / n)
    for i in range(n):
        sub_data_set = []
        while len(sub_data_set) < sub_data_set_count:
            if len(data_set_copy) == 0:
                break
            index = randrange(len(data_set_copy))
            sub_data_set.append(data_set_copy[index])
            data_set_copy.remove(data_set_copy[index])
        divide_data_list.append(sub_data_set)
    return divide_data_list


def combine_list(data_list):
    """合并训练数据"""
    train_data = []
    for data in data_list:
        train_data.extend(data)
    return train_data


def predict(tree, row):
    """将测试数据传入决策树预测结果"""
    if row[tree['index']] < tree['value']:
        if isinstance(tree['left'], dict):
            return predict(tree['left'], row)
        else:
            return tree['left']
    else:
        if isinstance(tree['right'], dict):
            return predict(tree['right'], row)
        else:
            return tree['right']


def indicator_function(x, y):
    """比较x和y，不相等则返回1，相等则返回0"""
    if float(x) == float(y):
        return 0
    else:
        return 1


def cal_weight_of_classify(predict_list, real_list, weights):
    """计算分类器的权重"""
    indicator_list = list(map(lambda x, y: indicator_function(x, y), predict_list, real_list))
    error_list = list(map(lambda x, y: (x * y), indicator_list, weights))
    error = sum(error_list)
    if error == 0.0:
        return 1
    else:
        classify_weight = (1 / 2) * math.log((1 - error) / error)
        return classify_weight


def update_data_weight(train_data, weights, predict_list, real_list, classify_weight):
    """更新每个训练数据的权重"""
    z_normalization = sum(list(map(lambda x, y: x * math.exp(-classify_weight * y), weights, predict_list)))
    for i in range(len(train_data)):
        weights[i] = weights[i] * math.exp(-classify_weight * real_list[i] * predict_list[i]) / z_normalization


def sigmoid_function(x):
    """x大于0，返回1，否则返回-1"""
    return 1.0 / (1 + math.exp(-x))


def cal_current_rate(predicted, actual):
    """计算正确率"""
    current = 0
    for i in range(len(actual)):
        if predicted[i] == actual[i]:
            current += 1
    return current / len(actual) * 100.0


def main():
    # 读取数据
    data_set = load_csv()
    # 划分训练数据和测试数据，划分5份数据交叉验证
    seed(1)
    weights_list = []
    # 训练数据和测试数据均为样本数据
    train_data = list(data_set)
    test_data = list(data_set)
    # print(len(train_data[0]))
    # print(len(test_data[0]))
    # 初始化训练数据的权重
    weights = init_data_weight(train_data)
    # print(weights)
    # 构建k个弱分类器
    k = 10
    max_depth, min_size, n_features = 6, 1, 20  # 决策树最大深度、训练数据最小划分标准，随机选取特征数
    tree_list = []
    classify_weight_list = []
    for i in range(k):
        tree = build_decision_tree(train_data, max_depth, min_size, n_features)
        tree_list.append(tree)
        # 求弱分类器的权重
        predict_list = [predict(tree, row) for row in test_data]
        real_list = [row[-1] for row in test_data]
        score = cal_current_rate(predict_list, real_list)
        print(score)
        classify_weight = cal_weight_of_classify(predict_list, real_list, weights)
        classify_weight_list.append(classify_weight)
        # print(classify_weight_list)
        # 更新训练数据的权重
        update_data_weight(train_data, weights, predict_list, real_list, classify_weight)
        weights_list.append(weights)
        current = 0
        for row in test_data:
            predicted_list = [predict(tree, row) for tree in tree_list]
            final_predict = sigmoid_function(sum(list(map(lambda x, y: x * y, predicted_list, classify_weight_list))))
            if final_predict == row[-1]:
                current += 1
        print('current_rate: ', current / len(test_data) * 100.0, '%')
        if classify_weight == 1:
            break


if __name__ == '__main__':
    main()
