# from sklearn.datasets import load_boston # has been removed from scikit-learn since version 1.2.
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

plt.switch_backend('TkAgg')


# %matplotlib inline

data_url = "http://lib.stat.cmu.edu/datasets/boston"
raw_df = pd.read_csv(data_url, sep=r"\s+", skiprows=22,header=None)  # 跳过了文件的前22行，没有使用文件的第一行作为列名

# data包含了特征数据，而target包含了目标数据。
# 因为原数据集是从23行开始，每两行对应一行的数据。
# 所以data选择raw_df中的奇数行（raw_df.values[::2, :]）和偶数行的前两列（raw_df.values[1::2, :2]）
data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
# target由raw_df 中的偶数行的第三列（raw_df.values[1::2, 2]）组成
target = raw_df.values[1::2, 2]

print(data[:10])

# todo: error
X = pd.DataFrame(data['data'], columns=data['feature_names'])
y = pd.DataFrame(data['target'], columns=['MEDV'])
# 按列拼接
df = pd.concat([X, y], axis=1)
df_train, df_test = train_test_split(df, test_size=0.1, random_state=1)
print('df_train:\n', df_train[:5])
#  (455, 14)
print('df_train.shape:\n', df_train.shape)


# generate tree
def sum_squared_error(y_true, y_pred):
    if len(y_true) > 0:
        # 残差平方和: ∑(y-y_pred)^2
        return np.square(y_true - y_pred).sum()
    else:
        return 0


plt.hist(df['MEDV'], 50)
plt.show()

print('df_train.columns:\n', df_train.columns)
#  <class 'pandas.core.series.Series'>
print('df_train["MEDV"] type:\n', type(df_train['MEDV']))


# 遍历每一列
# for var in df_train.columns[:-1]:
#     # unique: 用于获取Series对象的唯一值。唯一性按出现顺序返回。
#     var_vars = df_train[var].unique()
#     # print(var_vars.shape) # 每列的唯一值数量不同
#     # 对给定的数组的元素进行排序
#     thresh = np.sort(var_vars)
#     # 残差平方
#     # 初始化一个和thersh一样shape的zero矩阵
#     sse = np.zeros_like(thresh)
#     # print(thresh.shape) # 每列的唯一值数量不同
#     # i是sort之后的序号，t是唯一值
#     for i, t in enumerate(thresh):
#         # print(i, t)
#         # df的var列里，所有 <= t 的元素的索引（布尔索引）
#         idx = df[var] <= t
#         # print(idx[:5])
#         # print(idx.shape) # (506,)
#         # 下面根据是否<=t分成两部分
#         # MEDV列，所有df[var]<=t的行 的平均值 -> 预测的值？
#         l_val = df_train.loc[idx, 'MEDV'].mean()
#         # MEDV列，所有df[var]<=t的行 的所有元素 和 l_val 的残差平方
#         sse_l = sum_squared_error(df_train.loc[idx, 'MEDV'], l_val)
#         r_val = df_train.loc[~idx, 'MEDV'].mean()
#         sse_r = sum_squared_error(df_train.loc[~idx, 'MEDV'], r_val)
#         sse[i] = sse_l + sse_r
#
#     # 似乎是显示了残差平方的变化
#     plt.plot(thresh, sse)
#     plt.plot
#     plt.yscale('log')
#     plt.title(var)
#     plt.show()
#
#     # 每一列特征排序后，从小到大，看它们的残差平方最小的点
#     # 然后找出总的残差平方最小的取值？


def get_thresh_per_var(df, var, y_col):
    # unique: 用于获取Series对象的唯一值。唯一性按出现顺序返回。
    var_vars = df[var].unique()
    # 对给定的数组的元素进行排序
    thresh = np.sort(var_vars)
    # 残差平方
    # 初始化一个和thersh一样shape的zero矩阵
    sse = np.zeros_like(thresh)
    l_val = np.zeros_like(thresh)
    r_val = np.zeros_like(thresh)
    for i, t in enumerate(thresh):
        idx = df[var] <= t
        l_val[i] = df.loc[idx, y_col].mean()
        sse_l = sum_squared_error(df.loc[idx, y_col], l_val[i])
        r_val[i] = df.loc[~idx, y_col].mean()
        sse_r = sum_squared_error(df.loc[~idx, y_col], r_val[i])
        sse[i] = sse_l + sse_r

    idx = sse.argmin()  # 返回使sse最小的索引
    return thresh[idx], sse.min(), l_val[idx], r_val[idx]


def get_thresh(df, y_col):
    # inf表示无穷
    best_sse = float('inf')
    for col in df.columns:
        if col == y_col:
            continue
        # get_thresh_per_var 从每列中获取唯一值t进行分割，找到使残差平方最小的t
        # sse是得出的最小残差平方
        # l_val和r_val分别是是被t分割后的所有左、右样本的均值
        t, sse, l_val, r_val = get_thresh_per_var(df, col, y_col)
        if sse < best_sse:
            best_sse = sse
            best_col = col
            best_thresh = t
            best_l_val = l_val
            best_r_val = r_val

    return best_col, best_thresh, best_l_val, best_r_val


class Node:
    def __init__(self, val):
        # 值
        self.val = val
        # 左节点
        self.left = Node
        # 右节点
        self.right = Node


# 分裂
def splitting(df, y_col, depth=1):
    if len(df) < LEFT_THRESH or depth > DEPTH_THRESH:
        return None
    print('df.shape:\n', df.shape, '\ndepth:\n', depth)
    best_col, thresh, l_val, r_val = get_thresh(df, y_col)

    node = Node((best_col, thresh, l_val, r_val))

    idx = df[best_col] <= thresh
    node.left = splitting(df[idx], y_col, depth + 1)
    node.right = splitting(df[~idx], y_col, depth + 1)

    return node


LEFT_THRESH = 20
DEPTH_THRESH = 3
tree = splitting(df_train, 'MEDV')


def print_tree(tree, tab=''):
    print(tab + str(tree.val))
    if tree.left:
        print_tree(tree.left, tab + '\t')
    if tree.right:
        print_tree(tree.right, tab + '\t')


print_tree(tree)


def predict(row, tree=tree):
    # col是当前分割的依据 df的某列
    # thresh是node.val，是分割依据的t的值
    # l_val和r_val是左右节点
    col, thresh, l_val, r_val = tree.val
    # 如果分割列的值 <= t
    if row[col] <= thresh:
        # 左节点
        ans = l_val
        # 还能再分的话
        if tree.left:
            return predict(row, tree.left)
        else:
            return ans
    else:
        ans = r_val
        if tree.right:
            return predict(row, tree.right)
        else:
            return ans


from sklearn.metrics import mean_squared_error

# apply 接收一个函数，并在每个列(axis=1)上使用
y_pred = df_test.apply(predict, axis=1)
mse = mean_squared_error(y_pred, df_test['MEDV'])
print('mse:\n', mse)  # 15.301130071406932

mse2 = mean_squared_error(df_test['MEDV'], df_train['MEDV'].mean() * np.ones_like(df_test['MEDV']))
print('mse2:\n', mse2)  # 92.84966098534088

print(1 - mse / mse2)  # 0.835205321063879
