# 【案例描述及要求】
# 随机生成500个点，生成代码如下：
# x = np.arange(0, 1, 0.002)
# y = norm.rvs(0, size=500, scale=0.1)
# y = y + x**2
#将数据等分为训练集和测试集，尝试用2次方的多项式对该数据进行拟合,然后，进行正则化

import numpy as np
import scipy.stats as stats
import matplotlib.pylab as plt

from pylab import *                             #显示中文
mpl.rcParams['font.sans-serif'] = ['SimHei']  #显示中文

#生成数据集：500个样本
m=200
np.random.seed(3)
X =np.linspace(0,1,m)
y = stats.norm.rvs(0, size=m, scale=0.06)  # rvs : ndarray or scalar,Random variates of given `size`. loc=10, scale=正态分布中的sigma


#将数据集分成训练和测试集
train_X=X[0:m:2]         #提取训练集X
test_X=X[1:m:2]          #提取测试集X

train_y=y[0:m:2]                #提取训练集y
test_y=y[1:m:2]                 #提取测试集y
train_y = train_y*0.5+train_X**2    #计算训练集y
test_y = test_y*1.5+test_X**2       #计算测试集y

#多项式拟合数据标准化
train_X2=train_X**2
train_XX=np.c_[np.ones(train_X.shape[0]),train_X,train_X2]
train_y=np.c_[train_y]

test_X2=test_X**2
test_XX=np.c_[np.ones(test_X.shape[0]),test_X,test_X2]
test_y=np.c_[test_y]

#定义代价函数
def costFunction(X,y,theta,lamda):
    m,f=X.shape
    h=np.dot(X,theta)                       #预测值
    R=lamda/(2*m)*np.dot(theta.T,theta)     #正则化参数
    J=1.0/(2.0*m)*np.dot((h-y).T,(h-y))+R
    return J

#定义梯度下降算法
def gradDesc(X,y,theta=[[0]],alpha=0.05,iternum=30000,lamda=5):
    m,f=X.shape
    J_history=np.zeros(iternum)         #记录代价函数
    theta=np.zeros((f,1))               #初始化theta

    for i in range(iternum):
        J_history[i]=costFunction(X,y,theta,lamda)
        h = np.dot(X, theta)   #求预测值
        deltatheta=1.0/m*np.dot(X.T,(h-y))+lamda/m*theta   #加入正则化参数
        theta-=alpha*deltatheta

    return J_history,theta

J_history,theta=gradDesc(train_XX,train_y,lamda=0);
J_history_r,theta_r=gradDesc(train_XX,train_y);

#画代价函数图
plt.figure('代价函数图')
plt.plot(J_history)
plt.show()

#画训练集样本点和正则化前后的拟合曲线
h_train=np.dot(train_XX,theta)
h_test=np.dot(test_XX,theta)
print('正则化前训练集score=',1-((h_train-train_y)**2).sum()/((train_y-train_y.mean())**2).sum())
print('正则化前测试集score=',1-((h_test-test_y)**2).sum()/((test_y-test_y.mean())**2).sum())
plt.figure('训练集')
plt.title('训练集正则化前后拟合曲线')
plt.scatter(train_X,train_y,marker='x')
plt.plot(train_X,train_XX.dot(theta),c='r',lw=2,label='未正则化')
plt.plot(train_X,train_XX.dot(theta_r),c='y',lw=1,label='正则化')
plt.legend(loc='upper left')


#画测试集样本点和正则化前后的拟合曲线
h_train=np.dot(train_XX,theta_r)
h_test=np.dot(test_XX,theta_r)
print('正则化后训练集score=',1-((h_train-train_y)**2).sum()/((train_y-train_y.mean())**2).sum())
print('正则化后测试集score=',1-((h_test-test_y)**2).sum()/((test_y-test_y.mean())**2).sum())
plt.figure('测试集')
plt.title('测试集正则化前后拟合曲线')
plt.scatter(test_X,test_y,marker='x')
plt.plot(test_X,test_XX.dot(theta),c='r',lw=2,label='未正则化')
plt.plot(test_X,test_XX.dot(theta_r),c='y',lw=1,label='正则化')
plt.legend(loc='upper left')
plt.show()