# -*- coding: utf-8 -*-
'''
在线性回归模型中使用梯度下降法
@time 2018年2月21日
'''
import numpy as np
import matplotlib.pyplot as plt

x = np.random.random( size = 100 )
y = x * 3.0 + 4.0 + np.random.normal( size = 100 )

np.random.seed( 666 )
X = x.reshape( -1, 1 )

# 画图查看
plt.scatter( x, y )
plt.show()

def J( theta, X_b, y ):
    '''
    损失函数
    采用：均方误差 MSE （ Mean Squared Error ）
    '''
    try:
        return np.sum( ( y - X_b.dot( theta ) ) ** 2 ) / len( X_b )
    except:
        return float( "inf" )
    
def dJ( theta, X_b, y ):
    '''
    求偏导
    '''
    res = np.empty( len( theta ) )
    res[0] = np.sum( X_b.dot( theta ) - y )
    for i in range( 1, len( theta ) ):
        res[i] = ( X_b.dot( theta ) - y ).dot( X_b[:, i] )        
    return res * 2 / len( theta )

def gradient_descent( X_b, y, initial_theta, eta, n_iters = 1e4, epsilon = 1e-8 ):
    
    theta = initial_theta
    i_iter = 0
    
    while i_iter < n_iters:
        last_theta = theta
        gredient = dJ( theta, X_b, y )
        
        theta = theta - eta * gredient
        
        if np.abs( J( theta, X_b, y ) - J( last_theta, X_b, y ) ) < epsilon:
            break
        
        i_iter += 1
        
    return theta

X_b = np.hstack( ( np.ones( len( X ) ).reshape( -1, 1 ), X ) )
initial_theta = np.zeros( X_b.shape[1] ) # 出错原因：theta 应该是 X_b的列数，而不是行数

theta = gradient_descent( X_b, y, initial_theta, eta = 0.01 )
print( theta ) #[ 4.02412206  3.00954753]


####################################################
#
# 使用封装在包中的代码，测试
#
####################################################
from playML.LinearRegression import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit_gd( X, y )
print( lin_reg.coef_, lin_reg.intercept_ ) # [ 3.00954753] 4.02412205867








    
    
    
    
    
    
    
    