# 导入模块
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path = 'C:\\Users\\WGZZS7\\Desktop\\ex1data1.txt'
data = pd.read_csv(path, header=None, names=['Population', 'Profit'])
x_pd = data["Population"]
#print(x_pd.head())
nx = x_pd.size                   # 获取样本 n = 97
x_arr = np.array(x_pd)       # 转换为numpy.array模式 (1,97)  这些样本值都在 xarr里
e = np.ones(nx)                 # 生成同型的1矩阵
y_pd = data["Profit"]
#print(y_pd.head())
ny = y_pd.size
y_arr = np.array(y_pd)

#先进行绘图查看数据分布 生成散点图
# plt.scatter(x_arr, y_arr)
# plt.show()                      # 打印图形
# 将array格式转换为矩阵 方便计算
xMatrix = np.column_stack((e, x_arr))    # column_stack矩阵合并
xMatrix = np.matrix(xMatrix)            # 生成矩阵 (97,2)
# print(xMatrix.shape)
yMatrix = np.matrix(y_arr)
yMatrix = yMatrix.T
# print(yMatrix.shape)    # 查看矩阵性质 (97,1)
theta = np.matrix(np.array([0, 0]))
# print(theta.shape) # (1,2) 一行两列


# 利用矩阵运算损失函数
def computeCost(x, y, theta):
    # 最小二乘法: 求和 { (观测 - 理论) ^ 2 }
    inner = np.power((x * theta.T - y), 2)
    return np.sum(inner / (2*len(x)))


# set X (training data) and y (target variable)
# data.insert(0, 'Ones', 1)
# cols = data.shape[1]
# # [row  , column]
# X = data.iloc[:, 0:cols-1]  # X是所有行，去掉最后一列
# y  = data.iloc[:, cols-1:cols]  # y是所有的列，只显示最后一列
# 观察下 X (训练集) and y (目标变量)是否正确.
# print(X.head())
# print(y.head())


# 梯度下降法优化
def gradientDescent(X, Y, theta, alpha, iters, m):
    """
    :param X:  1*97的矩阵，记录城市人口规模
    :param Y:  1*97的矩阵，记录各个城市餐车利润
    :param theta: h(theta) = theta_1*x+theta_0  s
    :param alpha:  学习率
    :param iters:   迭代次数
    :param m:       样本数
    :return:  theta
    """
    result = np.matrix(np.zeros(theta.shape))
    # print(result)
    for i in range(iters):
        # 实现求导公式
        temp = X * theta.T - Y

    for j in range(2):
        inner = np.multiply(temp, X[:, j])
        result[0, j] = theta[0, j] - alpha * np.sum(inner) / m

    theta = result
    return theta


# 初始化一些附加变量
learningRate = 0.01  # 学习率
iterTimes = 1000   # 迭代次数
# 现在让我们运行梯度下降算法来将我们的参数θ适合于训练集。
g = gradientDescent(xMatrix, yMatrix, theta, learningRate, iterTimes, len(x_arr))
computeCost(xMatrix, yMatrix, g)

# 现在我们来绘制线性模型以及数据，直观地看出它的拟合
x = np.linspace(data.Population.min(), data.Population.max(), 100)
f = g[0, 0] + (g[0, 1] * x)
fig, ax = plt.subplots(figsize=(10, 8))

#scatter绘制散点,plot绘制经过点的曲线
ax.plot(x, f, 'r', label='Prediction')   # 目标函数
ax.scatter(data.Population, data.Profit, label='Traning Data')   # 数据集

ax.legend(loc=2)   # 展示 图层信息
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit vs. Population Size')

plt.show()