# -*- coding: utf-8 -*-
"""
多元线性回归模型
Created on Wed Feb 14 10:30:10 2018

@author: Allen
"""
import numpy as np
from .metrics import r2_score

class LinearRegression( object ):
    
    def __init__( self ):
        '''
        初始化模型
        '''
        self.intercept_ = None # 截距
        self.coef_ = None # 系数
        self._theta = None
        
    def fit_normal( self, X_train, y_train ):
        '''
        训练模型，求出theta
        '''
        X_b = np.hstack( ( np.ones( ( len( X_train ), 1 ) ), X_train ) )
        self._theta = np.linalg.inv( X_b.T.dot( X_b ) ).dot( X_b.T ).dot( y_train )
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1:]
        return self
    
    def predict( self, X_test ):
        '''
        对测试数据集进行预测
        '''
        X_b = np.hstack( ( np.ones( ( len( X_test ), 1 ) ), X_test ) )
        return X_b.dot( self._theta )
    
    def score( self, X_test, y_test ):
        '''
        计算R Squared
        '''
        y_predict = self.predict( X_test )
        return r2_score( y_test, y_predict )
    
    def fit_gd( self, X_train, y_train, eta = 0.1, n_iters = 1e4 ):
        '''
        使用梯度下降求theta
        '''
        def J( theta, X_b, y ):
            '''
            损失函数
            采用：均方误差 MSE （ Mean Squared Error ）
            '''
            try:
                return np.sum( ( y - X_b.dot( theta ) ) ** 2 ) / len( X_b )
            except:
                return float( "inf" )
            
        def dJ( theta, X_b, y ):
            '''
            求偏导
            '''
            ''' 使用比较笨的方法计算，根据公式可以使用以下更为简单的方法
            res = np.empty( len( theta ) )
            res[0] = np.sum( X_b.dot( theta ) - y )
            for i in range( 1, len( theta ) ):
                res[i] = ( X_b.dot( theta ) - y ).dot( X_b[:, i] )        
            return res * 2 / len( theta )
            '''
            return X_b.T.dot( X_b.dot( theta ) - y ) * 2.0 / len( y )
        
        def gradient_descent( X_b, y, initial_theta, eta, n_iters = 1e4, epsilon = 1e-8 ):
            
            theta = initial_theta
            i_iter = 0
            
            while i_iter < n_iters:
                last_theta = theta
                gredient = dJ( theta, X_b, y )
                
                theta = theta - eta * gredient
                
                if np.abs( J( theta, X_b, y ) - J( last_theta, X_b, y ) ) < epsilon:
                    break
                
                i_iter += 1
                
            return theta
        
        X_b = np.hstack( ( np.ones( len( X_train ) ).reshape( -1, 1 ), X_train ) )
        initial_theta = np.zeros( X_b.shape[1] ) # 出错原因：theta 应该是 X_b的列数，而不是行数
        
        theta = gradient_descent( X_b, y_train, initial_theta, eta = 0.01 )
        self._theta = theta
        self.coef_ = theta[1:]
        self.intercept_ = theta[0]
        return self
    
    def fit_sgd( self, X_train, y_train, n_iters = 5, t0 = 5, t1 = 50 ):
        '''
        根据训练数据集X_train,y_train,使用梯度下降法训练Linear Regression
        '''
        def dJ_sgd( theta, X_b_i, y_i ):
            return X_b_i.T.dot( X_b_i.dot( theta ) - y_i ) * 2
        
        def sgd( X_b, y, initial_theta, n_iters, t0 = 5, t1 = 50 ):

            theta = initial_theta
            m = len( X_b )
            
            def learning_rate( t ):
                return t0 / ( t + t1 )
            
            for cur_iter in range( n_iters ):
                indexes = np.random.permutation( m )
                X_b_new = X_b[indexes]
                y_new = y[indexes]
                for i in range( m ):
                    gradient = dJ_sgd( theta, X_b_new[ i ], y_new[ i ] )
                    
                    theta = theta - learning_rate( cur_iter * m + i ) * gradient
                
            return theta
        
        X_b = np.hstack( [ np.ones( ( len( X_train ), 1 ) ), X_train ] )
        initial_theta = np.zeros( X_b.shape[1] )
        self._theta = sgd( X_b, y_train, initial_theta, n_iters, t0, t1 )
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1]
        
        return self