import numpy as np
from metrics import r2_score

class  LinearRegression3:
    def __init__(self):
        self.coef_=None #系数，θ1-θn
        self.interception_=None #截距，θ0
        self._theta=None #整体的θ列向量
    def fit_normal(self,x_train,y_train):#正规化方程
        assert x_train.shape[0]==y_train.shape[0]#x行数等于y的列数=>每个x都要有对应的y值
        #使1与众多参数合并成一个矩阵，便于与截距和参数的增广阵相乘[b,θ1,θ2....θn]
        x_b=np.hstack([np.ones((len(x_train),1)),x_train])
        """计算Θ矩阵"""
        self._theta=np.linalg.inv(x_b.T.dot(x_b)).dot(x_b.T).dot(y_train)
        """返回的参数矩阵形式是 [b,θ1,...θn]"""
        self.interception_=self._theta[0]#截距b
        self.coef_=self._theta[1:]#θ1-θn
        return self
    
    def fit_sdg(self,X_train,y_train,n_iters=5,t0=5,t1=50):#随机梯度下降
        assert X_train.shape[0]==y_train.shape[0]
        assert n_iters>=1
        def dj_sgd(theta,X_b_i,y_i):#随机梯度计算
            return X_b_i.T.dot(X_b_i.dot(theta)-y_i)*2#选取X_b中的一行数据，和对应的y的值（1个值）
        def sgd(X_b,y,initial_theta,n_iters,t0,t1):
            t0=5
            t1=50
            """学习率计算公式：η=t0/(i_iters+t1)"""
            def learning_rate(t):
                return t0/(t+t1)
            theta=initial_theta
            m=len(X_b)
            for cur_iter in range(n_iters):#将所有样本看n_iters轮
                indexes=np.random.permutation(m)#生成乱序索引，提高泛化能力 
                """打乱顺序的数据集和对应值"""
                X_b_new=X_b[indexes]
                y_new=y[indexes]
                for i in range(m):#取一行数据进行随机梯度下降
                    gradient=dj_sgd(theta,X_b_new[i],y_new[i])#随机取一行数据计算梯度
                    
                    theta=theta-learning_rate(cur_iter*m+i)*gradient#（遍历整个样本的变数*样本总数+随机取一行的遍数）在每迭代中动态调整学习率。用当前η和梯度迭代一步θ
            return theta
        X_b=np.hstack([np.ones((len(X_train),1)),X_train])
        initial_theta=np.random.randn(X_b.shape[1])
        self._theta=sgd(X_b,y_train,initial_theta,n_iters,t0,t1)
        self.interception_=self._theta[0]
        self.coef_=self._theta[1:]
        return self

    def fit_gd(self,X_train,y_train,eta=0.01,n_inters=1e4):
        """根据训练集X_train,y_train,使用梯度下降法训练Linear Rregression模型"""
        assert X_train.shape[0]==y_train.shape[0]
        def J(theta,X_b,y):#损失函数
            try:
                return np.sum((y-X_b.dot(theta))**2)/len(y)
            except:
                return float("inf")
        def dJ(theta,X_b,y):#损失函数的梯度函数
            # res=np.empty(len(theta))#正常计算 
            # res[0]=np.sum(X_b.dot(theta)-y)
            # for i in range(1,len(theta)):
            #     res[i]=(X_b.dot(theta)-y).dot(X_b[:,i])
            # return res*2/len(X_b)
            return X_b.T.dot(X_b.dot(theta)-y)*2./len(y)#向量化计算：2/m*X_bT(X_bθ-y)
        def gradient_descent(X_b,y,initail_theta,eta,n_iters=1e4,epsilon=1e-8):
            theta=initail_theta
            cur_iter=0
            while cur_iter<n_iters:
                gradient=dJ(theta,X_b,y)
                last_theta=theta
                theta=theta-eta*gradient
                if (abs(J(theta,X_b,y)-J(last_theta,X_b,y))<epsilon):
                    break
                cur_iter=cur_iter+1
            return theta
        X_b=np.hstack([np.ones((len(X_train),1)),X_train])
        initial_theta=np.zeros(X_b.shape[1])#初始化所有参数是0
        self._theta=gradient_descent(X_b,y_train,initial_theta,eta,n_inters)#梯度下降训练
        self.interception_=self._theta[0]
        self.coef_=self._theta[1:]
        return self
        



    def predict(self,x_predict):
        assert self.coef_ is not None and self.interception_ is not None
        #保证输入数据集的列数（特征）等于特征个数
        assert x_predict.shape[1]==self.coef_.shape[0]
        #拼接
        X_b=np.hstack([np.ones((len(x_predict),1)),x_predict])
        return X_b.dot(self._theta)
    
    def score(self,x_test,y_test):
        y_predict=self.predict(x_test)
        return r2_score(y_test,y_predict)
    
    def __reper__(self):
        return "线性回归模型"