# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt

points = np.genfromtxt('data.csv', delimiter=',')
points[0,0]
# 提取两列数据x, y
x = points[:, 0]
y = points[:, 1]
# 使用plt画出散点图
plt.scatter(x, y)
plt.show()

# 定义损失函数
def conpute_cost(w, b, points):
    total_cost = 0
    M = len(points)
    # 逐个点计算平方损失误差，求平均数
    for i in range(M):
        x = points[i, 0]
        y = points[i, 1]
        total_cost += (y - w * x - b) ** 2
    return total_cost/M

# 定义模型的参数
alpha = 0.0001
initial_x = 0
initial_y = 0
num_iter = 10  # 迭代次数

# 定义核心的梯度下降算法
def grad_desc(points, initial_x, initial_y, alpha, num_iter):
    w = initial_x
    b = initial_y
#     定义一个列表来保存中途产生的损失函数值，用于后面画图显示下降过程
    cost_list = []
    for i in range(num_iter):
        cost_list.append(conpute_cost(w, b, points))
        w, b = step_grad_desc(w, b, alpha, points)
    return [w, b, cost_list]

def step_grad_desc(now_w, now_b, alpha, points):
    sum_grad_w = 0
    sum_grad_b = 0
    M = len(points)
    # 对每个点，代入公式求和
    for i in range(M):
        x = points[i, 0]
        y = points[i, 1]
        sum_grad_w += (now_w * x + now_b - y) * x
        sum_grad_b += now_w * x + now_b - y
    # 根据公式求梯度
    grad_w = 2/M * sum_grad_w
    grad_b = 2/M * sum_grad_b
    # 梯度变化，更新当前的w, b
    update_w = now_w - alpha * grad_w
    update_b = now_b - alpha * grad_b

    return update_w, update_b

# 测试，运行梯度下降算法计算最优的w和b
w, b, cost_list = grad_desc(points, initial_x, initial_y, alpha, num_iter)
print("w is:", w)
print("b is:", b)
cost = conpute_cost(w, b, points)
print("cost is:", cost)
# 画出梯度变化图线
plt.plot(cost_list)
plt.show()
# 画出拟合曲线
plt.scatter(x, y)
# 预测的对应y值
pred_y = w * x + b
plt.plot(x, pred_y, c='r')
plt.show()

