import math
import numpy as np
import matplotlib.pyplot as plt

def compute_cost(x,y,w,b):
    m = x.shape[0]
    cost = 0
    f_wb = w*x+b
    cost = sum(np.power(f_wb-y,2))/(2*m)
    return cost

def compute_gradient(x,y,w,b):
    m = x.shape[0]
    f_wb = w*x+b
    dj_dw = np.dot( x,(f_wb-y) ) /m
    dj_db = sum( f_wb-y )/m
    return dj_dw,dj_db

def gradient_descent(x,y,w,b,alpha,num_iters,cost_function,gradient_function):
    J_history = []
    for i in range(num_iters):
        dj_dw,dj_db = gradient_function(x,y,w,b)
        w -= alpha*dj_dw
        b -= alpha*dj_db

        J_history.append(cost_function(x,y,w,b))
        if i % math.ceil(num_iters/10) == 0:
            print(f'iteration={i},cost={J_history[-1]}')
    return w,b,J_history

def compute_model_output(x,w,b):
    return w*x+b

# 读取数据
with open('.\\test3\\LinearRegression\\lr2_data.txt', 'r') as file:
    datas = file.readlines()
# 转为np数据
datas = [data.strip('\n') for data in datas]
datas = [data.split('\t') for data in datas]
datas = np.array(datas, dtype=float)
x_train = datas[:, 1]
y_train = datas[:, 2]

# 线性回归过程
w_init = 0
b_init = 0
w_final,b_final,J_history = gradient_descent(x_train,y_train,w_init,b_init,1.0e-2,10000,compute_cost,compute_gradient)

print(f'w={w_final},b={b_final}')

# 绘图
fig,(ax1,ax2,ax3) = plt.subplots(1,3,constrained_layout=True,figsize=(12,4))
ax1.plot(J_history)
ax1.set_title('Convergence')
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Cost')
ax2.scatter(x_train,y_train,c='r',label='Actual Values')
ax2.plot(x_train,compute_model_output(x_train,w_final,b_final),c='b',label='Prediction')
ax2.set_title('Linear Regression')
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.legend()
w0,b0 = np.meshgrid(np.arange(-100,200,10),np.arange(-200,100,10))#一个横着递增，一个竖着递增。的两个矩阵
z = np.zeros_like(w0)
for ii_ in range(0,w0.shape[0],1):
    for jj_ in range(0,w0.shape[1],1):
        z[ii_][jj_]=compute_cost(x_train,y_train,w0[ii_][jj_],b0[ii_][jj_])
ax3.contour(w0,b0,z)
ax3.set_xlabel('w')
ax3.set_ylabel('b')
plt.show()
