#! /usr/bin/python
# _*_ coding:UTF-8 _*_
import numpy as np
import matplotlib.pyplot as plt


# 加载数据
def load_simp_data():
    # 测试样本集# [1.5, 1.6],
    data_mat = np.matrix([[1., 2.1],
                          [2., 1.1],
                          [1.3, 1.],
                          [1., 1.],
                          [2., 1.]])
    # 样本分类集
    class_labels = np.matrix([[1], [1], [-1], [-1], [1]])
    return data_mat, class_labels


# 绘制数据
def vist_data(data_mat, class_labels):
    # +1分类在样本矩阵中的位置
    positive = np.argwhere(class_labels==1)[:, 0]
    # -1分类在样本矩阵中的位置
    negative = np.argwhere(class_labels==-1)[:, 0]
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(data_mat[positive, 0], data_mat[positive, 1], marker='o', s=50)
    ax.scatter(data_mat[negative, 0], data_mat[negative, 1], marker='s', s=50)
    plt.show()


# 绘制单层决策树的划分结果
def vist_class_result(data_mat, class_labels, best_stump):
    # +1分类在样本矩阵中的位置
    positive = np.argwhere(class_labels == 1)[:, 0]
    # -1分类在样本矩阵中的位置
    negative = np.argwhere(class_labels == -1)[:, 0]
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(data_mat[positive, 0], data_mat[positive, 1], marker='o', s=50)
    ax.scatter(data_mat[negative, 0], data_mat[negative, 1], marker='s', s=50)

    thresh = best_stump['thresh']
    if best_stump['dim']==0:
        plt.plot([thresh, thresh], [0.9, 2.3])
    else:
        plt.plot([0.9, 2.3], [thresh, thresh])

    plt.show()


# 单层决策数分类
# data 训练数据集
# dimen 特征
# thresh_val 阈值
# thresh_ineq 取大于号还是小于号
def stump_classify(data, dimen, thresh_val, thresh_ineq):
    # 分类结果， 默认初始化为1
    result = np.mat(np.ones((np.shape(data)[0], 1), dtype=np.int8))
    if thresh_ineq == 'lt':  # 小于等于
        result[data[:, dimen] <= thresh_val] = -1
    else:
        result[data[:, dimen] > thresh_val] = -1
    return result


# 构建单层决策数
# data 训练样本数据
# class_labels 分类标记
# D 样本的权重
def build_stump(data, class_labels, D):
    m, n = np.shape(data)
    num_steps = 10  # 步数
    # 最优树桩，数据结构，主要描述了分类的规则，
    # 比如根据横坐标，阈值是多少，大于阈值的时候后是-1分类还是小于等于阈值的时候是-1分类
    best_stump = {}
    best_class_est = np.mat(np.zeros((m, 1)))  # 最佳树庄的分类结果
    min_error = np.inf

    # 处理每一个特征
    for i in range(n):
        range_min = data[:, i].min()  # 该特征下样本数据最小值
        range_max = data[:, i].max()  # 该特征下样本数据最大值
        step_size = (range_max - range_min) / (num_steps * 1.0);  # 步长大小
        # 每一步数
        for j in range(-1, num_steps + 1):
            # 每一个不等号
            for inequal in ['lt', 'gt']:
                thresh_val = range_min + float(j) * step_size  # 阈值
                # 在第i特征，第j个阈值，第inequal不等号下的预测值
                predicted_val = stump_classify(data, i, thresh_val, inequal)
                # 误差矩阵，预测值和知识值相等则为0,不等则为1
                error = np.mat(np.ones((m, 1)))
                # 计算误差
                error[predicted_val == class_labels] = 0
                # 计算权重误差
                weight_error = D.T * error
                # 打印执行过程
                print "split: dim dim %d, thresh %.2f, thresh inequal: %s, " \
                      "the weight error is %.3f"\
                      %(i, thresh_val, inequal, weight_error)
                # 判别是否更新最优决策树桩
                if weight_error < min_error:
                    min_error = weight_error  # 更新当前最小误差
                    best_class_est = predicted_val.copy()  # 更新当前最佳分类的分类结果
                    # 更新当前最佳分类的树桩结构体（字典结构）
                    best_stump['dim'] = i  # 更新当前最佳分类的特征
                    best_stump['thresh'] = thresh_val  # 更新当前最佳分类的阈值
                    best_stump['ineq'] = inequal  # 分类不等号。即小于等于阈值是标记为-1类别还是大于阈值是标记为-1类别
    # 返回最佳分类树桩， 最小误差， 分类结果
    return best_stump, min_error, best_class_est


# 训练adaboost算法
# data 训练数据集
# class_label训练集分类标签
# num_itear 迭代次数
def adaboost_train(data, class_label, num_itear=40):
    weak_class_arr = []  # 弱分类器数组
    m = np.shape(data)[0]  # 训练集样本数
    D = np.mat(np.ones((m, 1)) / m)  # 样本权重 默认值
    agg_class_est = np.mat(np.zeros((m, 1)))  # 累计分类误差（分类结果累加和）
    # 训练num_itear 次
    for i in range(num_itear):
        # 根据加权样本训练决策树桩
        (best_stump, error, class_est) = build_stump(data, class_label, D)
        print "D: ", D.T
        alpha = float(0.5 * np.log((1 - error) / max(error, 1e-16)))  # 计算alpha  1e-16 防止除以0溢出
        best_stump['alpha'] = alpha
        # 将在当前训练集下的最优单层决策树加入到弱分类器数组
        weak_class_arr.append(best_stump)
        print "class_est: ", class_est.T  # 分类器得到的分类结果
        expon = (-1) * alpha * np.multiply(class_label, class_est)
        Z = np.sum(np.multiply(D, np.exp(expon)))
        D = np.multiply((D / Z), np.exp(expon))

        agg_class_est += alpha * class_est  # alpha 为每个训练样例的权重
        print "agg_class_est: ", agg_class_est.T
        agg_errors = (np.sign(agg_class_est) != class_label).sum()
        error_rate = agg_errors.sum() / (m * 1.0)
        print "total error: ", error_rate, "\n"

        #  错误率为0跳出循环
        if error_rate ==0.0:
            break
    return weak_class_arr


#  adaboost分类器
def ada_classify(data, classifier_arr):
    m = np.shape(data)[0]
    agg_class_est = np.mat(np.zeros((m, 1)))
    for i in range(len(classifier_arr)):
        class_est = stump_classify(data, classifier_arr[i]['dim'],
                                   classifier_arr[i]['thresh'], classifier_arr[i]['ineq'])
        agg_class_est +=  classifier_arr[i]['alpha']*class_est
        print agg_class_est
    return np.sign(agg_class_est)


# 加载数据
def load_data_set(file_name):
    num_feat = len(open(file_name).readline().split('\t'))
    data_arr = []
    label_arr = []
    fr = open(file_name)
    j=0
    for line in fr.readlines():
        print j
        j+=1
        line_arr = []
        cur_line = line.strip().split('\t')
        for i in range(num_feat-1):
            line_arr.append(float(cur_line[i]))
        data_arr.append(line_arr)
        label_arr.append(float(cur_line[-1]))
    return np.mat(data_arr), np.mat(label_arr).T

