from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
from sklearn.model_selection import train_test_split
import pandas as pd
font = FontProperties(fname=r"C:\windows\fonts\simsun.ttc", size=14)    # 解决windows环境下画图汉字乱码问题




def linearRegression(X, y, alpha=0.01, num_iters=400):
    print(u"加载数据...\n")

    m = len(y)  # 总的数据条数
    col = data.shape[1]  # data的列数

    X, mu, sigma = featureNormaliza(X)  # 归一化
    plot_X1_X2(X)  # 画图看一下归一化效果

    X = np.hstack((np.ones((m, 1)), X))  # 在X前加一列1

    print(u"\n执行梯度下降算法....\n")

    theta = np.zeros((X.shape[1], 1))
    y = y.reshape(-1, 1)  # 将行向量转化为列
    theta, J_history = gradientDescent(X, y, theta, alpha, num_iters)

    plotJ(J_history, num_iters)

    return mu, sigma, theta  # 返回均值mu,标准差sigma,和学习的结果theta


# 归一化feature
def featureNormaliza(X):
    X_norm = np.array(X)  # 将X转化为numpy数组对象，才可以进行矩阵的运算
    # 定义所需变量
    mu = np.zeros(X.shape)
    sigma = np.zeros(X.shape)

    mu = np.mean(X_norm, 0)  # 求每一列的平均值（0指定为列，1代表行）
    sigma = np.std(X_norm, 0)  # 求每一列的标准差
    for i in range(X.shape[1]):  # 遍历列
        X_norm[:, i] = (X_norm[:, i] - mu[i]) / sigma[i]  # 归一化

    return X_norm, mu, sigma


# 画二维图
def plot_X1_X2(X):
    plt.scatter(X[:, 0], X[:, 1])
    plt.show()


# 梯度下降算法
def gradientDescent(X, y, theta, alpha, num_iters):
    m = len(y)
    n = len(theta)

    temp = np.matrix(np.zeros((n, num_iters)))  # 暂存每次迭代计算的theta，转化为矩阵形式

    J_history = np.zeros((num_iters, 1))  # 记录每次迭代计算的代价值

    for i in range(num_iters):  # 遍历迭代次数
        h = np.dot(X, theta)  # 计算内积，matrix可以直接乘
        temp[:, i] = theta - ((alpha / m) * (np.dot(np.transpose(X), h - y)))  # 梯度的计算
        theta = temp[:, i]
        J_history[i] = computerCost(X, y, theta)  # 调用计算代价函数
        print('.', end=' ')
    return theta, J_history


# 计算代价函数
def computerCost(X, y, theta):
    m = len(y)
    J = 0

    J = (np.transpose(X * theta - y)) * (X * theta - y) / (2 * m)  # 计算代价J
    return J


# 画每次迭代代价的变化图
def plotJ(J_history, num_iters):
    x = np.arange(1, num_iters + 1)
    plt.plot(x, J_history)
    plt.xlabel(u"迭代次数", fontproperties=font)  # 注意指定字体，要不然出现乱码问题
    plt.ylabel(u"代价值", fontproperties=font)
    plt.title(u"代价随迭代次数的变化", fontproperties=font)
    plt.show()



# 测试学习效果（预测）
def predict(mu, sigma, theta,X_test):
    result = 0
    # 注意归一化
    for i in range(X_test.shape[1]):  # 遍历列
        X_test[:, i] = (X_test[:, i] - mu[i]) / sigma[i]  # 归一化
    final_predict = np.hstack((np.ones((X_test.shape[0], 1)), X_test))

    result = np.dot(final_predict, theta)  # 预测结果
    return result

if __name__ == "__main__":
    data_url = "http://lib.stat.cmu.edu/datasets/boston"
    raw_df = pd.read_csv(data_url, sep="\s+", skiprows=22, header=None)
    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
    target = raw_df.values[1::2, 2]  # 读取数据

    print(data.shape, data.shape[1])
    X, X_test, y, y_test = train_test_split(data, target, test_size=0.2, random_state=42)
    mu, sigma, theta = linearRegression(X, y,0.01, 400 )
    print("\n计算的theta值为：\n", theta)
    print("\n预测结果为：%f" ,predict(mu, sigma, theta,X_test))