import numpy as np
import matplotlib.pyplot as plt


def add_x0(x):
    m = len(x)
    XX = np.c_[np.ones([m, 1]), x]
    return XX


def model(XX, theta):
    return XX.dot(theta)


def gradient_descent(XX, y, theta = None, alpha=0.001, iter=1000):
    m = XX.shape[0]
    n = XX.shape[1]
    J = []

    if theta is None:
        theta = np.zeros([n, 1])

    for i in range(iter):
        h = model(XX, theta)
        e = h - y
        cost = 1.0 / (2 * m) * e.T.dot(e)
        cost = cost[0][0]
        J.append(cost)
        delta = 1.0 / m * (XX.T.dot(e))
        theta -= alpha * delta
    return theta, J


def score(h, y):
    e = h - y
    y_mean = y.min()
    dya = y - y_mean
    u = e.T.dot(e)
    v = dya.T.dot(dya)
    return 1 - u / v


np.random.seed(1)
plt.figure(figsize=[12, 5])
spr = 1
spc = 3
spn = 0

# 读取
data_loaded = np.loadtxt(r'../data/ex1data2.txt', delimiter=',')  # mxn
m = len(data_loaded)
n = data_loaded.shape[1]

# 洗牌
rand_idx = np.random.permutation(m)
data_loaded = data_loaded[rand_idx]

# 放缩
mu_vector = data_loaded.mean(axis=0)  # n,
sigma_vector = data_loaded.std(axis=0)  # n,
data_scaled = (data_loaded - mu_vector) / sigma_vector  # mxn ~ n, => mxn ~ mxn

# 处理
x = data_scaled[:, 0:-1]
y = data_scaled[:, -1].reshape(m, 1)
XX = add_x0(x)

# 切分
split_point = int(0.7 * m)
train_x, test_x = np.split(x, [split_point])
train_y, test_y = np.split(y, [split_point])
train_XX, test_XX = np.split(XX, [split_point])

# 梯度下降
theta, J = gradient_descent(train_XX, train_y)
print(f'Theta = {theta}')

spn += 1
plt.subplot(spr, spc, spn)
plt.plot(J)
plt.title('cost function')
plt.xlabel('iteration')

spn += 1
plt.subplot(spr, spc, spn)
train_hy = model(train_XX, theta)
plt.scatter(train_y, train_y)
plt.scatter(train_y, train_hy)
print(f'train score = {score(train_hy, train_y)}')

spn += 1
plt.subplot(spr, spc, spn)
test_hy = model(test_XX, theta)
plt.scatter(test_y, test_y)
plt.scatter(test_y, test_hy)
print(f'test score = {score(test_hy, test_y)}')

plt.show()
