"""
线性回归原理实现：使用 经验-薪资 的单一特征进行实现
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mlp
from matplotlib.font_manager import fontManager
import numpy as np

# 读取salary.csv文件
df = pd.read_csv("salary.csv")
print(df.head())
x = df["YearsExperience"]
y = df["Salary"]
fontManager.addfont("ChineseFont.ttf")
mlp.rc('font', family='ChineseFont', size=12)


def pred_line(w, b):
    plt.scatter(x, y, marker="x", color="red", label="实际数据")
    y_pred = w * x + b
    plt.plot(x, y_pred, color="green", label="预测线")
    plt.xlabel("经验")
    plt.ylabel("薪资(K)")
    plt.legend()
    plt.xlim([0, 12])
    plt.ylim([1, 140])
    plt.show()


# 随机初始化参数
pred_line(10, 10)


def compute_cost(x, y, w, b):
    """
    cost function 成本函数，计算预测值到实际值的误差
    """
    # 预测函数
    y_pred = w * x + b
    # 距离差的平方
    cost = (y - y_pred) ** 2
    # 均值 np.mean
    cost = cost.sum() / len(x)
    return cost

# 如何找到最优的参数w和b
# 穷举w和b的范围值进行计算cost
ws = np.arange(-100, 101)
bs = np.arange(-100, 101)
costs = np.zeros((201, 201))


i = 0
for w in ws:
    j = 0
    for b in bs:
        cost = compute_cost(x, y, w, b)
        costs[i, j] = cost
        j = j + 1
    i = i + 1
# 获取最小的cost时w和b的下标
w_, b_ = np.where(costs == np.min(costs))
# 穷举获得的最优值
print(ws[w_], bs[b_]) # 9 29
pred_line(ws[w_], bs[b_])

# 二维网格
ax = plt.axes(projection="3d")
ax.view_init(30, 60)
ax.xaxis.set_pane_color((0, 0, 0))
ax.yaxis.set_pane_color((0, 0, 0))
ax.zaxis.set_pane_color((0, 0, 0))
w_grid, b_grid = np.meshgrid(ws, bs)
ax.plot_surface(w_grid, b_grid, costs, cmap="Spectral_r", alpha=0.5)
ax.set_title("w,b-cost")
ax.set_xlabel("w")
ax.set_ylabel("b")
ax.set_zlabel("cost")
ax.scatter(ws[w_], bs[b_], costs[w_, b_], color="red")
plt.show()


def compute_gradient(x, y, w, b):
    """
    根据w和b的梯度/斜率(微积分)来更新w和b
    """
    w_gradient = (2 * x * (w * x + b - y)).sum() / len(x)
    b_gradient = (2 * (w * x + b - y)).sum() / len(x)
    return w_gradient, b_gradient

def gradient_decsent(w, b, times=1000):
    """
    gradient descent 梯度下降
    :return:
    """
    learning_rate = 0.001  # 学习率，可调整，方便调参
    for i in range(times):
        w_gradient, b_gradient = compute_gradient(x, y, w, b)
        w = w - learning_rate * w_gradient
        b = b - learning_rate * b_gradient
    return w, b


w, b = gradient_decsent(0, 0, 10000)
# 微分获得的最优值, 保留2位小数
print(round(w, 2), round(b, 2)) # 9.14 27.89
pred_line(w, b)

# 使用sklearn包进行预测
from sklearn import linear_model

lr = linear_model.LinearRegression()
lr.fit(x.values.reshape(-1, 1), y.values)
w = lr.coef_[0]
b = lr.intercept_
print(round(w, 2), round(b, 2)) # 9.12 28.01
pred_line(w, b)
