"""
广义线性回归 &  逻辑回归
"""
import numpy as np
from sklearn.linear_model import (LogisticRegression, LinearRegression,
                                  TweedieRegressor, TheilSenRegressor,
                                  RANSACRegressor, HuberRegressor)
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline, make_pipeline


class LR:
    """
    逻辑回归
    """
    def __init__(self):
        x, y = self.load_lr_data()
        self.x_train, self.x_test, self.y_train, self.y_test = \
            train_test_split(x, y, test_size=0.2, random_state=0)

    @staticmethod
    def load_lr_data():
        """
        生成逻辑回归的数据
        y = 1 / (1 + e^(-x)) = (1 + tanh(x/2)) / 2
        :return:
        """
        # weight = [[2], [-7], [23]]
        weight = [2, -7, 23]
        size = 10000
        x = [np.random.normal(loc=-3, scale=5, size=size),
             np.random.normal(loc=12, scale=7, size=size),
             [1 for _ in range(size)]]
        x = np.array(x).T
        exp_y = np.dot(x, weight)
        # 噪音
        exp_y = [num + np.random.normal() for num in exp_y]

        # y = (1 + tanh(x/2)) / 2
        y = (1 + np.tanh(np.array(exp_y) / 2)) / 2
        y = [1 if num >= 0.5 else 0 for num in y]

        """
        # y = 1 / (1 + e^(-x))
        y1 = 1 / (1 + np.exp(-1 * exp_y))
        y1 = [1 if num >= 0.5 else 0 for num in y1]
        """
        return x, y

    def lr(self):
        """
        逻辑回归
        :return:
        """
        clf = LogisticRegression(random_state=0)
        clf.fit(self.x_train, self.y_train)

        # 模型评分
        scores = clf.score(self.x_test, self.y_test)
        print('scores: {}'.format(scores))

        # predict评分
        y_pred = clf.predict(self.x_test)
        error = abs(np.array(y_pred - np.array(self.y_test)))
        print('rate: {}'.format(1 - error.sum() / len(error)))

        # predict概率评分
        y_pred_proba = clf.predict_proba(self.x_test)
        y_pred_proba = [arr.tolist().index(max(arr.tolist()))
                        for arr in y_pred_proba]
        error_proba = np.abs(np.array(y_pred_proba) - np.array(self.y_test))

        print('概率预测准确率: {}'.format(1 -
                                   error_proba.sum() / len(error_proba)))


class GLR:
    """
    广义线性回归
    """
    def __init__(self):
        x, y = self.load_data()
        self.degree = 2
        self.x_train, self.x_test, self.y_train, self.y_test = \
            train_test_split(x, y, test_size=0.2, random_state=0)

    @staticmethod
    def load_data():
        """
        生成广义线性回归的数据
        y = a1 * x1 + a2 * x3 + a3 * x1 * x2 + a4 * x2^2 + b
        :return:
        """
        a1 = -5
        a2 = 13
        a3 = 27
        a4 = -9
        b = 25
        x1 = np.random.normal(loc=2, scale=10, size=10000)
        x2 = np.random.normal(loc=15, scale=7, size=10000)
        x3 = np.random.normal(loc=-20, scale=32, size=10000)

        y = a1 * x1 + a2 * x3 + a3 * x1 * x2 + a4 * np.power(x2, 2) + b
        y = [num + np.random.normal() for num in y]
        x = np.array([x1, x2, x3]).T
        return x, np.array(y)

    def polynomial(self):
        """
        多项式
        :return:
        """
        """
        Pipeline
        带有估算器的transform pipline
        steps: List of (name, transform) tuples
        memory: it is the path to the caching directory
        verbose: default False, if True print cost times by steps
        
        PolynomialFeatures
        生成多项式和交互特征
        degree：多项式的阶数
        interaction_only：default False
           if true 仅生成交互特征，x1 * x2， x1^2等，没有 a1 * x1
        include_bias：default True，有bias
        """
        clf = Pipeline(steps=[
            ('poly', PolynomialFeatures(degree=self.degree)),
            ('linear', LinearRegression(fit_intercept=True))
        ])
        clf.fit(self.x_train, self.y_train)

        # 模型评分
        scores = clf.score(self.x_test, self.y_test)
        print('scores: {}'.format(scores))

        y_pred = clf.predict(self.x_test)
        r2 = r2_score(y_true=self.y_test, y_pred=y_pred)
        print('r2 scores: {}'.format(r2))

    def tweediereg(self, power=0, alpha=0, link='auto', max_iter=100):
        """

        :param power:
           default 0: Normal;
              Specific estimators such as Ridge, ElasticNet are generally more
              appropriate in this case.
           1:：Posson; PoissonRegressor is exposed for convenience. However,
              it is strictly equivalent to
              TweedieRegressor(power=1, link='log').
           2：Gamma; Gamma distribution.
               GammaRegressor is exposed for convenience.
               However, it is strictly equivalent to
               TweedieRegressor(power=2, link='log')
           (1, 2)：Poisson Gamma的复合
           3: Inverse Gaussian distribution
        :param alpha: 正则惩罚项系数。0为非惩罚，X无共线性
        :param link:
           'identity': for Normal distribution
           'log': for Poisson, Gamma and Inverse Gaussian distributions
        :param max_iter: The maximal number of iterations for the solver
        :return:
        """
        clf = TweedieRegressor(power=power, alpha=alpha, link=link,
                               max_iter=max_iter)
        clf.fit(self.x_train, self.y_train)

        # 模型评分
        scores = clf.score(self.x_test, self.y_test)
        print('scores: {}'.format(scores))

        y_pred = clf.predict(self.x_test)
        r2 = r2_score(y_true=self.y_test, y_pred=y_pred)
        print('r2 scores: {}'.format(r2))

    def robust_lr(self):
        """
        稳健回归
        :return:
        """
        estimators = [
            ('OLS', LinearRegression()),
            ('Theil-Sen', TheilSenRegressor(random_state=0)),
            ('RANSAC', RANSACRegressor(random_state=0)),
            ('HuberRegressor', HuberRegressor())
        ]

        for name, estimator in estimators:
            """
            根据给定的estimators构建pipline
            *steps: list of estimators
            memory: it is the path to the caching directory
            verbose: default False, if True print cost times by steps
            """
            clf = make_pipeline(PolynomialFeatures(degree=self.degree),
                                estimator)
            clf.fit(self.x_train, self.y_train)
            mse = mean_squared_error(clf.predict(self.x_test), self.y_test)
            scores = clf.score(self.x_test, self.y_test)
            print('name: {}; mse: {}; scores: {}'.format(name, mse, scores))


def run():
    """

    :return:
    """
    #  逻辑回归
    print('{} 逻辑回归 {}'.format('=' * 50, '=' * 50))
    lr = LR()
    lr.lr()

    #  广义线性回归
    print('{} 广义线性回归 {}'.format('=' * 50, '=' * 50))
    glr = GLR()
    print('{} tweediereg {}'.format('-' * 25, '-' * 25))
    glr.tweediereg()
    print('{} 多项式回归 {}'.format('-' * 25, '-' * 25))
    glr.polynomial()
    print('{} 稳健回归 {}'.format('-' * 25, '-' * 25))
    glr.robust_lr()


if __name__ == '__main__':
    run()
