# -*- coding: utf-8 -*-
"""
逻辑回归在sklearn中的应用
Created on Mon Apr 23 16:33:34 2018

@author: Allen
"""
import numpy as np
import matplotlib.pyplot as plt

np.random.seed( 666 )
X = np.random.normal( 0, 1, size = ( 200, 2 ) )
y = np.array( X[:, 0]**2 + X[:, 1] < 1.5, dtype = "int" )

for _ in range( 20 ):
    y[np.random.randint( 200 )] = 1

plt.scatter( X[y==0, 0], X[y==0, 1] )
plt.scatter( X[y==1, 0], X[y==1, 1] )
plt.show()

# sklearn 中 逻辑回归的运用
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, random_state = 666 )

from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit( X_train, y_train )
print( log_reg.score( X_train, y_train ) ) # 0.793333333333
print( log_reg.score( X_test, y_test ) ) # 0.86

# 引入多项式逻辑回归
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

def PolynomialLogisticRegression( degree ):
    return Pipeline([
                ( "poly", PolynomialFeatures( degree = degree ) ),
                ( "std_scaler", StandardScaler() ),
                ( "log_reg", LogisticRegression() )
            ])
    
poly_log_reg =  PolynomialLogisticRegression( 2 )
poly_log_reg.fit( X_train, y_train )
print( poly_log_reg.score( X_train, y_train ) ) # 0.913333333333
print( poly_log_reg.score( X_test, y_test ) ) # 0.94
'''阶数为2，远远好于一条直线'''
 
poly_log_reg =  PolynomialLogisticRegression( 20 )
poly_log_reg.fit( X_train, y_train )
print( poly_log_reg.score( X_train, y_train ) ) # 0.94
print( poly_log_reg.score( X_test, y_test ) ) # 0.92

# 使用C，对模型进行正则化
def PolynomialLogisticRegression( degree, C ):
    return Pipeline([
                ( "poly", PolynomialFeatures( degree = degree ) ),
                ( "std_scaler", StandardScaler() ),
                ( "log_reg", LogisticRegression( C = C ) )
            ])
    
poly_log_reg =  PolynomialLogisticRegression( degree = 20, C = 0.1 )
poly_log_reg.fit( X_train, y_train )
print( poly_log_reg.score( X_train, y_train ) ) # 0.853333333333
print( poly_log_reg.score( X_test, y_test ) ) # 0.92


# 使用L2正则项
def PolynomialLogisticRegression( degree, C, penalty = "l2" ):
    return Pipeline([
                ( "poly", PolynomialFeatures( degree = degree ) ),
                ( "std_scaler", StandardScaler() ),
                ( "log_reg", LogisticRegression( C = C, penalty = "l2" ) )
            ])
    
poly_log_reg =  PolynomialLogisticRegression( degree = 20, C = 0.1, penalty = "l2" )
poly_log_reg.fit( X_train, y_train )
print( poly_log_reg.score( X_train, y_train ) ) # 0.853333333333
print( poly_log_reg.score( X_test, y_test ) ) # 0.92

'''
实际数据，degree、C、正则等都是超参数，需要使用网格搜索来确定最优参数。
'''