import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

data = pd.read_csv('ex1data2.txt', names=['size', 'bedrooms', 'price'])

def normalize_feature(data):
    # 减去均值 / 方差
    return (data - data.mean()) / data.std()


data = normalize_feature(data)
#         size  bedrooms     price
# 0   0.130010 -0.223675  0.475747
# 1  -0.504190 -0.223675 -0.084074
# 2   0.502476 -0.223675  0.228626
# 3  -0.735723 -1.537767 -0.867025
# 4   1.257476  1.090417  1.595389

data.plot.scatter('size', 'price', label='size')
plt.show()

data.plot.scatter('bedrooms', 'price', label='bedrooms')
plt.show()

data.insert(0, 'ones', 1)

X = data.iloc[:, 0:-1]
y = data.iloc[:, -1]

# 将dataframe转成数组
X = X.values
y = y.values
y = y.reshape(47, 1)


# 损失函数
def costFunction(X, y, theta):
    inner = np.power(X @ theta - y, 2)
    return np.sum(inner) / (2 * len(X))

theta = np.zeros((3,1))

cost_int = costFunction(X, y, theta)

print(cost_int)
# 0.4893617021276595

# ================
# 梯度下降
def gradientDescent(X, y, theta, alpha, iters):
    costs = []
    for i in range(iters):
        theta = theta - (X.T @ (X @ theta - y)) * alpha / len(X)
        cost = costFunction(X, y, theta)
        costs.append(cost)

    return theta, costs


# 不同alpha下的效果
candinate_alpha = [0.0003, 0.003, 0.03, 0.0001, 0.001, 0.01]
iters = 2000


fig, ax = plt.subplots()
for alpha in candinate_alpha:
    _, costs = gradientDescent(X, y, theta, alpha, iters)
    ax.plot(np.arange(iters), costs, label = alpha)

ax.legend()
ax.set(xlabel='iters', ylabel='cost', title='cost vs iters')
plt.show()






