#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import numpy as np
import matplotlib.pyplot as plt
import tools
from sklearn.linear_model import SGDRegressor

# 载入数据
data = tools.loadData()

# 按列对数据做标准化处理
oriD = np.matrix(data)
(M, N) = oriD.shape
D = np.matrix(np.zeros((M, N)))
for i in range(0, N):
    mu = np.mean(oriD[:, i])
    sigma = np.std(oriD[:, i])
    D[:, i] = (oriD[:, i] - mu) / sigma

# 取前 400 组数据作为训练数据
m = 400
stdX = D[0:m, 0:-1]
Y = D[0:m, -1:]
(m, n) = stdX.shape

# 加入偏执单元
X = np.hstack([np.ones((m, 1)), stdX])

# 最小二乘法求解
thetaLS = tools.leastSquare(X, Y)

# 初始化 theta
th = np.matrix(np.zeros((n + 1, 1)))

# 梯度下降求解
(thetaGD, J, ci) = tools.gradientDescent(th, X, Y, alpha=0.1, maxIter=1000)

# 训练 sgd
clf = SGDRegressor(max_iter=1000)
clf.fit(stdX, Y.A1)

# 取 400 以后的数据作为测试数据
stdX = D[m:, 0:-1]
Y = D[m:, -1:]
(m, n) = stdX.shape

# 加入偏执单元
X = np.hstack([np.ones((m, 1)), stdX])

# 最小二乘
hatLS = X * thetaLS
# 梯度下降
hatGD = X * thetaGD
# sgd
hatSGD = clf.predict(stdX)

plt.plot(Y, label='ori', marker='p')
plt.legend()

plt.plot(hatLS, label='ls', marker='o')
plt.legend()

plt.plot(hatGD, label='gd', marker='+')
plt.legend()

plt.plot(hatSGD, label='sgd', marker='x')
plt.legend()

plt.savefig('generalize.png')
