"""
The codes here are also suitable for Linear Regression with only single feature
"""
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

def compute_model_output(x,w,b):
    f_wb = x * w + b
    return f_wb

def compute_cost(x,y,w,b):
    m = x.shape[0]
    f_wb = x * w + b
    cost = np.sum(np.power((f_wb - y),2))/(2*m)
    return cost

def compute_gradient(x,y,w,b):
    m,n = x.shape
    f_wb = x * w + b
    dj_dw = (x.T * (f_wb - y))/m
    dj_db = np.sum(f_wb - y)/m
    return dj_dw,dj_db

def gradient_descent(x,y,w,b,alpha,num_iters,cost_function,gradient_function):
    J_history = []
    for i in range(num_iters):
        dj_dw,dj_db = compute_gradient(x,y,w,b)
        w -= alpha*dj_dw
        b -= alpha*dj_db

        J_history.append(compute_cost(x,y,w,b))
        if i % math.ceil(num_iters/10) == 0:
            print(f'iterations={i:4},cost={J_history[-1]:0.2e}')
    return w,b,J_history

data_source = '.\\test3\\ExtraExercise\\MultipleLinearRegression\\ex1data2.txt'
data = pd.read_csv(data_source,header=None,names=['Size', 'Bedrooms' ,'Price'])
data = (data - data.mean())/data.std()

n = data.shape[1]
x_train = np.matrix(data.iloc[:,0:n-1])
y_train = np.matrix(data.iloc[:,n-1]).T

w_init = np.matrix(np.zeros(x_train.shape[1])).T
b_init = 0

w_final,b_final,J_history = gradient_descent(x_train,y_train,w_init,b_init,1.0e-3,10000,compute_cost,compute_gradient)

print(f'w={w_final},b={b_final}')
fig,(ax1) = plt.subplots(1,1,constrained_layout = True,figsize = (12,8))
ax1.plot(J_history)
ax1.set_title('Convergence')
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Cost')
plt.show()