import numpy as np
from .metrics import r2_score


class LinearRegression1:
    """一维线性回归"""

    def __init__(self):
        self.a_ = None
        self.b_ = None

    def fit(self, x_train, y_train):
        assert x_train.shape[0] == y_train.shape[0], "维数必须相同"
        num = 0.0
        d = 0.0
        x_mean = np.mean(x_train)
        y_mean = np.mean(y_train)
        for x, y in zip(x_train, y_train):
            num += (x - x_mean) * (y - y_mean)
            d += ((x - x_mean) ** 2)
        self.a_ = num / d;
        self.b_ = y_mean - self.a_ * x_mean
        return self

    def predict(self, x_test):
        return [self._predict(x) for x in x_test]

    def _predict(self, x):
        return self.a_ * x + self.b_

    def __repr__(self):
        return "LinearRegression1"


class LinearRegression2:
    """一维线性回归 向量计算"""

    def __init__(self):
        self.a_ = None
        self.b_ = None

    def fit(self, x_train, y_train):
        assert x_train.shape[0] == y_train.shape[0], "维数必须相同"
        num = 0.0
        d = 0.0
        x_mean = np.mean(x_train)
        y_mean = np.mean(y_train)
        num = (x_train - x_mean).dot(y_train - y_mean)
        d = (x_train - x_mean).dot(x_train - x_mean)
        self.a_ = num / d;
        self.b_ = y_mean - self.a_ * x_mean
        return self

    def predict(self, x_test):
        return np.array([self._predict(x) for x in x_test])

    def _predict(self, x):
        return self.a_ * x + self.b_

    def score(self, x_test, y_test):
        return r2_score(self.predict(x_test), y_test)

    def __repr__(self):
        return "LinearRegression2"


class LinearRegression3:
    """多维线性回归 向量计算"""

    def __init__(self):
        self.intercept_ = None
        self.k_ = None
        self._theta = None

    def fit(self, X_train, y_train):
        """矩阵最小二乘法计算theta"""
        X_b = np.hstack((np.ones(shape=(len(y_train), 1)), X_train))
        self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
        self.intercept_ = self._theta[0]
        self.k_ = self._theta[1:]
        return self

    def fit_gd(self, X_train, y_train):
        """梯段下降法计算theta"""

        def J(X_b, y_train, theta):
            try:
                return np.sum((X_b.dot(theta) - y_train) ** 2) / len(X_b)
            except:
                return float("inf")

        def dJ(X_b, y_train, theta):
            # dt = np.empty(len(theta))
            # dt[0] = np.sum(X_b.dot(theta) - y_train)
            # for i in range(1, len(theta)):
            #    dt[i] = np.sum((X_b.dot(theta) - y_train).dot(X_b[:, i]))
            # return 2 * dt / len(X_b)
            return 2 * X_b.T.dot(X_b.dot(theta) - y_train) / len(y_train)

        def dJ_debug(X_b, y_train, theta, epsilon=0.01):
            res = np.empty(len(theta))
            for i in range(len(theta)):
                theta_1 = theta.copy()
                theta_1[i] += epsilon
                theta_2 = theta.copy()
                theta_2[i] -= epsilon
                res[i] = J(X_b, y_train, theta_1) - J(X_b, y_train, theta_2) / (2 * epsilon)
            return res

        def grediant_vector(X_b, y_train, theta, eta=0.01, epsilon=1e-8, n_iter=1e4):
            m_iter = 0
            while m_iter < n_iter:
                last_theta = theta
                theta = theta - eta * dJ(X_b, y_train, theta)
                if np.absolute(J(X_b, y_train, theta) - J(X_b, y_train, last_theta)) <= epsilon:
                    break
                m_iter += 1
            return theta

        X_b = np.hstack([np.ones(shape=(len(X_train), 1)), X_train])
        theta = np.zeros(X_b.shape[1])
        self._theta = grediant_vector(X_b, y_train, theta)
        self.intercept_ = self._theta[0]
        self.k_ = self._theta[1:]
        return self

    def fit_sgd(self, X_train, y_train, n_iters=5, t0=5, t1=50):
        """根据训练数据集X_train, y_train, 使用梯度下降法训练Linear Regression模型"""
        assert X_train.shape[0] == y_train.shape[0], \
            "the size of X_train must be equal to the size of y_train"
        assert n_iters >= 1

        def dJ_sgd(theta, X_b_i, y_i):
            return X_b_i.T.dot((X_b_i.dot(theta) - y_i)) * 2.

        def sgd(X_b, y, initial_theta, n_iters, t0=5, t1=50):

            def learning_rate(t):
                return t0 / (t + t1)

            theta = initial_theta
            m = len(X_b)

            for cur_iter in range(n_iters):
                indexes = np.random.permutation(m)
                X_b_new = X_b[indexes]
                y_new = y[indexes]
                for i in range(m):
                    gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
                    theta = theta - learning_rate(cur_iter * m + i) * gradient

            return theta

        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        initial_theta = np.random.randn(X_b.shape[1])
        self._theta = sgd(X_b, y_train, initial_theta, n_iters, t0, t1)

        self.intercept_ = self._theta[0]
        self.k_ = self._theta[1:]

        return self

    def predict(self, X_test):
        X_b = np.hstack((np.ones(shape=(len(X_test), 1)), X_test))
        return X_b.dot(self._theta)

    def score(self, X_test, y_test):
        return r2_score(self.predict(X_test), y_test)

    def __repr__(self):
        return "LinearRegression3"
