# -*- coding: utf-8 -*-
"""
LogisticRegression
Created on Sat Apr 21 10:11:31 2018

@author: Allen
"""

import numpy as np
from .metrics import accuracy_score

class LogisticRegression( object ):
    
    def __init__( self ):
        '''
        初始化模型
        '''
        self.intercept_ = None # 截距
        self.coef_ = None # 系数
        self._theta = None
        
    def predict_proba( self, X_test ):
        '''
        对测试数据集进行概率预测
        '''
        X_b = np.hstack( ( np.ones( ( len( X_test ), 1 ) ), X_test ) )
        return self._sigmoid( X_b.dot( self._theta ) )
    
    def predict( self, X_test ):
        '''
        对测试数据集进行预测
        '''
        proba = self.predict_proba( X_test )
        return np.array( proba >= 0.5, dtype = "int" )
    
    def score( self, X_test, y_test ):
        '''
        计算R Squared
        '''
        y_predict = self.predict( X_test )
        return accuracy_score( y_test, y_predict )
    
    def _sigmoid( self, t ):
        return 1. / ( 1 + np.exp( -t ) )
     
    def fit( self, X_train, y_train, eta = 0.1, n_iters = 1e4 ):
        '''
        使用梯度下降求theta
        '''
        def J( theta, X_b, y ):
            '''
            损失函数
            采用：均方误差 MSE （ Mean Squared Error ）
            '''
            y_hat = self._sigmoid( X_b.dot( theta ) )
            try:
                return -np.sum( y * np.log( y_hat ) + ( 1 - y ) * np.log( 1 - y_hat ) ) / len( y )
            except:
                return float( "inf" )
            
        def dJ( theta, X_b, y ):
            '''
            求偏导
            '''
            ''' 使用比较笨的方法计算，根据公式可以使用以下更为简单的方法
            res = np.empty( len( theta ) )
            res[0] = np.sum( X_b.dot( theta ) - y )
            for i in range( 1, len( theta ) ):
                res[i] = ( X_b.dot( theta ) - y ).dot( X_b[:, i] )        
            return res * 2 / len( theta )
            '''
            return X_b.T.dot( self._sigmoid( X_b.dot( theta ) ) - y ) / len( y )
        
        def gradient_descent( X_b, y, initial_theta, eta, n_iters = 1e4, epsilon = 1e-8 ):
            
            theta = initial_theta
            i_iter = 0
            
            while i_iter < n_iters:
                last_theta = theta
                gredient = dJ( theta, X_b, y )
                
                theta = theta - eta * gredient
                
                if np.abs( J( theta, X_b, y ) - J( last_theta, X_b, y ) ) < epsilon:
                    break
                
                i_iter += 1
                
            return theta
        
        X_b = np.hstack( ( np.ones( len( X_train ) ).reshape( -1, 1 ), X_train ) )
        initial_theta = np.zeros( X_b.shape[1] ) # 出错原因：theta 应该是 X_b的列数，而不是行数
        
        theta = gradient_descent( X_b, y_train, initial_theta, eta = 0.01 )
        self._theta = theta
        self.coef_ = theta[1:]
        self.intercept_ = theta[0]
        return self

