# -*- coding: utf-8 -*-
"""
随机梯度下降法
Created on Thu Feb 22 10:41:27 2018

@author: Allen
"""
import numpy as np
import matplotlib.pyplot as plt

m = 100000
x = np.random.normal( size = m )
X = x.reshape( -1, 1 )
y = 4.0 * x + 3.0 + np.random.normal( 0, 3, size = m )

def dJ_sgd( theta, X_b_i, y_i ):
    return X_b_i.T.dot( X_b_i.dot( theta ) - y_i ) * 2

def sgd( X_b, y, initial_theta, n_iters ):
    
    # 设置一个相对比较保险的t0,t1值
    t0 = 5
    t1 = 50
    
    theta = initial_theta
    
    def learning_rate( t ):
        return t0 / ( t + t1 )
    
    for cur_iter in range( n_iters ):
        rand_i = np.random.randint( len( X_b ) )
        gradient = dJ_sgd( theta, X_b[ rand_i ], y[ rand_i ] )
        
        theta = theta - learning_rate( cur_iter ) * gradient
        
    return theta

X_b = np.hstack( [ np.ones( ( len( X ), 1 ) ), X ] )
# initial_theta = np.zeros( ( len( X_b ), 1 ) ) 错误的，这样形成是一个矩阵
initial_theta = np.zeros( X_b.shape[1] )
theta = sgd( X_b, y, initial_theta, n_iters = len( X_b ) )
print( theta ) # 输出 [ 2.9810087  4.0020449]

##############################
#
# 注意：
#   在随机梯度下降过程中，学习率是在逐步减小的过程！
#
#
##############################