import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
from bs4 import BeautifulSoup
import os
from sklearn.linear_model import LinearRegression as SK_LR
from sklearn.linear_model import Ridge, Lasso, ElasticNet, LassoLars
from sklearn.metrics import r2_score
# r2_score_lasso = r2_score(y_test, y_pred_lasso)

mpl.rcParams['font.sans-serif'] = ['SimHei'] #指定默认字体
mpl.rcParams['axes.unicode_minus'] = False #解决保存图像是负号'-'显示为方块的问题


def load_data(file):
    with open(file, 'r') as f:
        file_data = f.readlines()

    data_arr = [info[: -1].split('\t') for info in file_data]

    data_set = []
    labels = []
    for info in data_arr:
        info_arr = [float(num) for num in info]
        data_set.append(info_arr[: -1])
        labels.append(info_arr[-1])

    return data_set, labels


class LineRegression(object):
    """
    线性回归
    """
    def __init__(self, train_data, train_label):
        self.train_data = train_data
        self.train_label = train_label
        self.weight = np.zeros(len(train_data[0]))

    def stand_regres(self):
        """
        标准线性回归（最小二乘）

        (y - X * w)^(T) * (y - X * w) ,对w求导得到：X^(T) * (y - X * w)，令其为0，解出如下：
        w = (X^(T) * X)^(-1) * X^(T) * y
        （x矩阵转置 * x矩阵）的逆矩阵 * x矩阵转置 * y

        A^(-1) = A^(*) / |A|
        矩阵的逆 = 伴随矩阵 / 矩阵的行列式
        A的伴随矩阵：由各元素(aij)的代数余子式Aij组成的矩阵
        余子式：矩阵A去掉元素aij所在的第i行和第j列的元素后，剩余元素按原来的排序组成的n-1阶矩阵
        代数余子式：-1^(i+j) * 余子式

        :return:
        """
        x_arr_t = np.mat(self.train_data).T.A
        x_arr_x = np.dot(x_arr_t, self.train_data)
        # 计算矩阵的行列式
        if np.linalg.det(x_arr_x) == 0:
            # 奇异矩阵，不能求逆
            print('This matrix is singular, can not do inverse')
            return
        """
        1、求解矩阵的逆
         1) np.mat(t).I  # t为列或者数组
         2) np.linalg.inv(t)  # t为矩阵，列表，或者数组均可
         
        2、求解方程A*x = b
          1）np.linalg.solve(A, b)
          2) np.dot(np.linalg.inv(A), b)
        """
        w_t_y = np.dot(x_arr_t, self.train_label)
        self.weight = np.dot(np.linalg.inv(x_arr_x), w_t_y)

    def lwlr(self, test_point, k=1.0):
        """
        缺点：增加了计算量，对每个点的predict均要用到整体数据集

        局部加权线性回归(locally weighted lr)：单点predict

        w = (X^(T) * W * X)^(-1) * X^(T) * W * y
        其中：W是用来给每个数据点赋予权重的矩阵

        lwlr使用的核函数是高斯核：
        w(i, i) = exp(|x(i) - x| / (-2 * k^2))

        :param test_point:
        :param k:
        :return:
        """
        m = np.shape(self.train_data)[0]
        point_weights = np.eye(m)
        for j in range(m):
            diff = np.array(test_point) - np.array(self.train_data[j])
            # diff_distance = np.dot(diff, [[num] for num in diff])
            diff_distance = np.dot(diff, diff)

            # point_weights[j, j] = np.exp(diff_distance[0] / (-2 * np.power(k, 2)))
            point_weights[j, j] = np.exp(diff_distance / (-2 * np.power(k, 2)))

        train_data_t = np.mat(self.train_data).T.A
        train_data_weight = np.dot(point_weights, self.train_data)
        x_t_x = np.dot(train_data_t, train_data_weight)

        # 计算矩阵的行列式
        if np.linalg.det(x_t_x) == 0:
            print('奇异矩阵，不能求逆')
            return

        train_label_weight = np.dot(point_weights, self.train_label)
        self.weight = np.dot(np.linalg.inv(x_t_x), np.dot(train_data_t, train_label_weight))
        return np.dot(test_point, self.weight)

    def ridge_regres(self, lam=0.2):
        """
        岭回归(Lasso回归，约束条件为权重平方和/绝对值和<lamda，该回归将避免多重共线性)

        1、特征数>样本数：矩阵非满秩，不能求逆
        2、防止overfit，在数据矩阵中，增加惩罚项

        标准线性回归 w = (X^(T) * X)^(-1) * X^(T) * y
        由于 x 非满秩，故调整为(I为样本数 * 样本数的单位矩阵)：
        w = (X^(T) * X + lamda * I)^(-1) * X^(T) * y

        :param lam:
        :return:
        """
        # 数据标准化
        x_var = np.array(self.train_data).var()
        # y_var = np.array(self.train_label).var()
        x = (np.array(self.train_data) - np.array(self.train_data).mean()) / x_var
        # 规范化后，coef更小
        # y = (np.array(self.train_label) - np.array(self.train_label).mean()) / y_var
        y = np.array(self.train_label) - np.array(self.train_label).mean()

        # 开始回归
        m = np.shape(x)[1]
        x_t_x = np.dot(np.array(x).T, x)
        x_t_x_modify = x_t_x + lam * np.eye(m)

        # 计算矩阵的行列式
        if np.linalg.det(x_t_x_modify) == 0:
            print('奇异矩阵，不能求逆，请调整参数lam值')
            return

        w_t_y = np.dot(np.array(x).T, y)
        self.weight = np.dot(np.linalg.inv(x_t_x_modify), w_t_y)

    @staticmethod
    def ridge_regres_paramter(train_data, train_label, lam=0.2):
        """
        :param lam:
        :return:
        """
        # 数据标准化
        x_var = np.array(train_data).var()
        x = (np.array(train_data) - np.array(train_data).mean()) / x_var
        y = np.array(train_label) - np.array(train_label).mean()

        # 开始回归
        m = np.shape(x)[1]
        x_t_x = np.dot(np.array(x).T, x)
        x_t_x_modify = x_t_x + lam * np.eye(m)

        # 计算矩阵的行列式
        if np.linalg.det(x_t_x_modify) == 0:
            print('奇异矩阵，不能求逆，请调整参数lam值')
            return

        w_t_y = np.dot(np.array(x).T, y)
        return np.dot(np.linalg.inv(x_t_x_modify), w_t_y)

    def ridge_regres_test(self, train_data, train_label):
        """
        带参领回归预测
        :param train_data:
        :param train_label:
        :return:
        """
        num_test_iterm = 30
        test_arr = np.ones((num_test_iterm, np.shape(train_data)[1]))
        for i in range(num_test_iterm):
            mid_weight = self.ridge_regres_paramter(train_data, train_label, lam=np.exp(i - 10))
            test_arr[i] = mid_weight
        return test_arr.mean(axis=0)

    def stage_wise_regres(self, eps=0.01, num_iter=100):
        """
        前向逐步线性回归(效果与lasso差不多，但比lasso简单)
        :param eps:
        :param num_iter:
        :return:
        """
        # 数据标准化
        x_var = np.array(self.train_data).var()
        # y_var = np.array(self.train_label).var()
        x = (np.array(self.train_data) - np.array(self.train_data).mean()) / x_var
        # 规范化后，coef更小
        # y = (np.array(self.train_label) - np.array(self.train_label).mean()) / y_var
        y = np.array(self.train_label) - np.array(self.train_label).mean()

        # 开始回归
        m, n = np.shape(x)
        ws = np.zeros((n, 1))
        ws_max = ws.copy()
        weight = np.zeros((num_iter, n))
        for i in range(num_iter):
            lowest_error = np.inf
            for j in range(n):
                print('i: {}; j: {}'.format(i, j))
                for sign in [-1, 1]:
                    ws_test = ws.copy()
                    ws_test[j] += eps * sign
                    y_test = np.dot(x, ws_test)
                    rss_error = ((y - y_test) ** 2).sum()
                    if rss_error < lowest_error:
                        lowest_error = rss_error
                        ws_max = ws_test
            ws = ws_max.copy()
            weight[i] = ws.T
        return weight

    def stand_regres_predict(self, test_data):
        """
        标准线性回归 predict
        """
        return np.dot(self.weight, np.array(test_data).T)

    def lwlr_predict(self, test_data, k=1.0):
        """
        局部加权线性回归 predict
        """
        m = np.shape(test_data)[0]
        pre_label = np.zeros(m)
        for i in range(m):
            pre_label[i] = self.lwlr(test_point=test_data[i], k=k)
        return pre_label

    def cross_validation(self, num_val=10):
        """
        交叉验证
        :param num_val:
        :return:
        """
        m = len(self.train_label)
        index_list = [i for i in range(m)]
        error = np.zeros((num_val, 30))
        for i in range(num_val):
            train_x = []
            train_y = []
            test_x = []
            test_y =[]
            np.random.shuffle(index_list)
            for j in range(m):
                if j < m * 0.9:
                    train_x.append(self.train_data[index_list[j]])
                    train_y.append(self.train_label[index_list[j]])
                else:
                    test_x.append(self.train_data[index_list[j]])
                    test_y.append(self.train_label[index_list[j]])

            weight = self.ridge_regres_test(train_x, train_y)
            for k in range(30):
                var_test_x = np.var(test_x)
                stand_test_x = (np.array(test_x) - np.mean(test_x)) / var_test_x
                # 预测值回复标准化之前的值
                pre_y = np.dot(weight[k, :], np.array(stand_test_x).T) + np.mean(test_y)
                error[i, k] = ((pre_y - np.array(test_y)) ** 2).sum()

        error_mean = np.mean(error, 0)
        min_mean = float(min(error_mean))
        best_weight = weight[np.nonzero(error_mean == min_mean)]
        print('the best model from ridge regression is: {}'.format(best_weight / np.var(self.train_data)))
        print('with constant term: {}'.format(-1 * sum(np.multiply(np.mean(self.train_data),
                                                                   best_weight / np.var(self.train_data))) +
                                              np.mean(self.train_label)))

    @staticmethod
    def evaluate_model(pre_label, test_label):
        """
        模型评估
        :param pre_label:
        :param test_label:
        :return:
        """
        """
        1、皮尔逊相关系数：线性相关
        r = cov(x, y) / sqrt(var(x) * var(y))

        协方差：
        cov(x, y) = E[(x - x_mean) * (y - y_mean)] = E[x * y] - x_mean * y_mean

        2、Spearman秩相关系数：衡量秩序的相关性
        spocc(x, y) = PLCC(R(x), R(y))

        秩序R(xi) = k：xi是序列x中的第k大/小
        PLCC是Pearson线性相关系数

        3、Kendall(肯德尔)系数：计算有序类别的相关系数
        同序对（concordant pairs）和异序对（discordant pairs）之差与总对数（n*(n-1)/2)的比值定义为Kendall(肯德尔)系数
        """
        corr = np.corrcoef(test_label, pre_label)
        print('Pearson线性相关系数: {}'.format(corr))

        """
        方差
        var(x) = sum((x(i) - x_mean)^2) / len(x)
        
        标准差
        std(x) = sqrt(var(x))
        """
        error = np.array(test_label) - np.array(pre_label)
        std_error = error.std()
        print('标准误差：{}'.format(std_error))

        # 预测误差
        # 训练误差随着模型越来越复杂而越来越小，模型从欠拟合-->合适-->过拟合
        # 测试误差则由大到小再到大，模型在合适的时候测试误差最小（泛化能力最强）
        pre_error = (error ** 2).sum()
        print('预测误差： {}'.format(pre_error))

        # self.plt_plot(test_data, test_label, pre_label)

    @staticmethod
    def plt_plot(test_data, test_label, pre_label):
        """
        对评估结果画图结果画图
        :param x:
        :param y:
        :return:
        """
        # fig = plt.figure()
        # ax = fig.add_subplot(111)
        #
        # ax.scatter([num[1] for num in x], y)
        # ax.plot([num[1] for num in x], [num[0] for num in pre_y])

        plt.scatter([num[1] for num in test_data], test_label, c='g')
        # 标准线性回归
        # plt.plot([num[1] for num in test_data], pre_label, c='r')

        # 局部加权线性回归
        test_data = np.mat(test_data)
        str_ind = test_data[:, 1].argsort(0)
        x_sort = test_data[str_ind][:, 0, :]
        plt.plot(x_sort[:, 1], pre_label[str_ind], c='r')

        plt.show()

    def sklearn_info(self, test_data=None, test_label=None):
        lr_stand_clf = SK_LR()
        # ridge regression
        lr_ridge = Ridge(alpha=1e-3)
        # Lasso
        lr_lasso = Lasso(alpha=1e-5)
        # ElasticNet
        lr_elasticnet = ElasticNet(alpha=1e-5)
        # LARS Lasso
        lr_lasso_lars = LassoLars(alpha=1e-5)

        lr_stand_clf.fit(self.train_data, self.train_label)
        lr_ridge.fit(self.train_data, self.train_label)
        lr_lasso.fit(self.train_data, self.train_label)
        lr_elasticnet.fit(self.train_data, self.train_label)
        lr_lasso_lars.fit(self.train_data, self.train_label)

        if test_data:
            lr_stand_pre = lr_stand_clf.predict(test_data)
            lr_ridge_pre = lr_ridge.predict(test_data)
            lr_lasso_pre = lr_lasso.predict(test_data)
            lr_elasticnet_pre = lr_elasticnet.predict(test_data)
            lr_lasso_lars_pre = lr_lasso_lars.predict(test_data)

            print('stand')
            self.evaluate_model(pre_label=lr_stand_pre, test_label=test_label)
            print('模型得分：{}\n'.format(r2_score(test_label, lr_stand_pre)))

            print('ridge')
            self.evaluate_model(pre_label=lr_ridge_pre, test_label=test_label)
            print('模型得分：{}\n'.format(r2_score(test_label, lr_ridge_pre)))

            print('Lasso')
            self.evaluate_model(pre_label=lr_lasso_pre, test_label=test_label)
            print('模型得分：{}\n'.format(r2_score(test_label, lr_lasso_pre)))

            print('ElasticNet')
            self.evaluate_model(pre_label=lr_elasticnet_pre, test_label=test_label)
            print('模型得分：{}\n'.format(r2_score(test_label, lr_elasticnet_pre)))

            print('LARS Lasso')
            self.evaluate_model(pre_label=lr_lasso_lars_pre, test_label=test_label)
            print('模型得分：{}\n'.format(r2_score(test_label, lr_lasso_lars_pre)))

        # print(lr_stand_clf.coef_, lr_stand_clf.intercept_)


def load_lego_data():
    def read_signal_html(file_path, file_name):
        """
        解析单个html
        :param file_name:
        :return:
        """

        x = []
        y = []
        year, num_pce, orig_price = FILE_PARAMETER.get(file_name)

        file = os.path.join(file_path, file_name)
        with open(file, encoding='utf8', errors='ignore') as f:
            soup = BeautifulSoup(f.read())

        i=1
        # 根据HTML页面结构进行解析
        current_row = soup.findAll('table', r="%d" % i)
        while len(current_row) != 0:
            current_row = soup.findAll('table', r="%d" % i)
            title = current_row[0].findAll('a')[1].text
            lwr_title = title.lower()

            # 查找是否有全新标签
            if (lwr_title.find('new') > -1) or (lwr_title.find('nisb') > -1):
                new_flag = 1.0
            else:
                new_flag = 0.0

            # 查找是否已经标志出售，我们只收集已出售的数据
            soldUnicde = current_row[0].findAll('td')[3].findAll('span')
            if len(soldUnicde) == 0:
                print("item #%d did not sell" % i)
            else:
                # 解析页面获取当前价格
                sold_price = current_row[0].findAll('td')[4]
                price_str = sold_price.text
                price_str = price_str.replace('$','') #strips out $
                price_str = price_str.replace(',','') #strips out ,
                if len(sold_price)>1:
                    price_str = price_str.replace('Free shipping', '')
                sell_price = float(price_str)

                # 去掉不完整的套装价格
                if sell_price > orig_price * 0.5:
                    x.append([year, num_pce, new_flag, orig_price])
                    y.append(sell_price)
            i += 1
            current_row = soup.findAll('table', r="%d" % i)
        return x, y

    FILE_PARAMETER = {
        'lego8288.html': [2006, 800, 49.99],
        'lego10030.html': [2002, 3096, 269.99],
        'lego10179.html': [2007, 5195, 499.99],
        'lego10181.html': [2007, 3428, 199.99],
        'lego10189.html': [2008, 5922, 299.99],
        'lego10196.html': [2009, 3263, 249.99]
    }

    file_path = './data/B/chp8/setHtml'
    files = os.listdir(file_path)

    data_set = []
    label_set = []

    for file in files:
        x, y = read_signal_html(file_path, file)
        data_set.extend(x)
        label_set.extend(y)

    return data_set, label_set


def run_test():
    train_file = './data/chp8/ex0.txt'
    train_data, train_label = load_data(train_file)

    lr = LineRegression(train_data, train_label)

    # 标准线性回归
    print('标准线性回归')
    lr.stand_regres()
    lr_pre = np.dot(train_data, lr.weight)
    lr.evaluate_model(lr_pre, train_label)
    print('模型得分： {}\n'.format(r2_score(train_label, lr_pre)))

    # 局部加权线性回归
    print('局部加权线性回归')
    # signal_predict = lr.lwlr(test_point=train_data[0], k=1.0)
    all_predict = lr.lwlr_predict(test_data=train_data, k=0.003)
    lr.evaluate_model(all_predict, train_label)
    print('模型得分： {}\n'.format(r2_score(train_label, all_predict)))

    # 岭回归
    print('岭回归')
    # 数据标准化
    x_var = np.array(train_data).var()
    x = (np.array(train_data) - np.array(train_data).mean()) / x_var
    y = np.array(train_label) - np.array(train_label).mean()
    for i in range(30, -1, -1):
        lr.ridge_regres(lam=np.exp(i - 10))
        predict = np.dot(lr.weight, np.array(x).T)
        lr.evaluate_model(pre_label=predict, test_label=y)
        print('模型得分： {}'.format(r2_score(y, predict)))
    print('\n')

    # 岭回归，另外一种
    print('另外一种 岭回归')
    x_var = np.array(train_data).var()
    x = (np.array(train_data) - np.array(train_data).mean()) / x_var
    y = np.array(train_label) - np.array(train_label).mean()

    weight = lr.ridge_regres_test(train_data, train_label)
    predict = np.dot(weight, np.array(x).T)

    lr.evaluate_model(pre_label=predict, test_label=y)
    print('模型得分： {}\n'.format(r2_score(y, predict)))

    # sklearn
    lr.sklearn_info(test_data=train_data, test_label=train_label)


def predict_abalone_years():
    """
    预测鲍鱼年龄
    :return:
    """
    train_file = './data/chp8/abalone.txt'
    train_data, train_label = load_data(train_file)
    # 数据标准化
    x_var = np.array(train_data).var()
    x = (np.array(train_data) - np.array(train_data).mean()) / x_var
    y = np.array(train_label) - np.array(train_label).mean()

    lr = LineRegression(train_data, train_label)
    # predict
    local_weight_lr_predict = lr.lwlr_predict(test_data=train_data, k=0.1)
    for_step_lr_weight = lr.stage_wise_regres(eps=0.01, num_iter=200)

    # 局部加权线性回归
    print('局部加权线性回归')
    # all_predict = lr.lwlr_predict(test_data=train_data[:99], k=0.1)
    lr.evaluate_model(local_weight_lr_predict, train_label)
    print('模型得分： {}\n'.format(r2_score(train_label, local_weight_lr_predict)))

    # 岭回归
    # for i in range(30):
    #     print('*' * 60)
    #     print('{} {}'.format(i, np.exp(i - 10)))
    #     lr.ridge_regres(lam=np.exp(i - 10))
    #     predict = np.dot(lr.weight, np.array(train_data).T)
    #
    #     lr.evaluate_model(test_data=train_data, pre_label=predict, test_label=train_label)

    # 前向逐步线性回归
    print('前向逐步线性回归')
    predict = np.dot(for_step_lr_weight, np.array(x).T)
    lr.evaluate_model(predict, y)
    print('模型得分： {}\n'.format(r2_score(y, predict)))

    # sklearn
    lr.sklearn_info(test_data=train_data, test_label=train_label)


def run_lego():
    """
    预测乐高价格
    :return:
    """
    train_data, train_label = load_lego_data()
    # 增加截距
    train_data = [[1] + arr for arr in train_data]

    lr = LineRegression(train_data, train_label)
    lr.stand_regres()
    predict = lr.stand_regres_predict(test_data=train_data[0])

    print('{} {}'.format(predict, train_label[0]))


if __name__ == '__main__':
    # test
    # run_test()
    # 预测鲍鱼年龄
    predict_abalone_years()
    # 预测乐高价格
    # run_lego()
