import numpy as np
import matplotlib.pyplot as plt

# 1.	完成数据集的读取
# load
data = np.loadtxt('data1.txt', delimiter=',')
m = len(data)
# scale
mu = data.mean(axis=0)
sigma = data.std(axis=0)
data -= mu
data /= sigma
# shuffle
np.random.seed(666)
np.random.shuffle(data)
X = np.c_[np.ones(m), data[:, :-1]]
y = data[:, -1]
# split
m_train = int(0.7 * m)
X_train, X_test = np.split(X, [m_train])
y_train, y_test = np.split(y, [m_train])


# 2.	实现线性回归的代价函数
# -函数定义语法
# -代价函数公式
# -函数参数书写正确
# -函数返回书写正确
def model(X, theta):
    return X.dot(theta)


def cost_func(h, y):
    m = len(h)
    e = h - y
    j = 0.5 / m * e.T.dot(e)
    return j


# 3.	实现梯度下降函数
# -函数定义语法
# -记录代价函数
# -梯度下降算法
# -函数正确返回
def grad(X, y, alpha=0.001, iter0=20000):
    m, n = X.shape
    group = iter0 // 20
    theta = np.zeros(n)
    j_his = np.zeros(iter0)  # cost function values in all iteration
    for i in range(iter0):
        h = model(X, theta)
        j = cost_func(h, y)
        if 0 == i % group:
            print(f'#{i + 1} cost function value = {j}')
        j_his[i] = j
        dt = 1 / m * X.T.dot(h - y)
        theta -= alpha * dt
    if 0 != i % group:
        print(f'#{i + 1} cost function value = {j}')
    return theta, j_his, h


# 4.	通过梯度下降计算回归模型，用所得模型对测试集的数据进行预测
# -正确调用训练数据计算模型
# -计算测试集数据的预测结果
def score(h, y):
    u = np.sum((h - y) ** 2)
    mu = y.mean()
    v = np.sum((y - mu) ** 2)
    return 1 - u / v


alpha=0.001
iter0=20000
theta, j_his, h_train = grad(X_train, y_train, alpha, iter0)
plt.plot(j_his, label='cost function')
plt.xlabel('Iterations')
plt.grid()
plt.legend()
s_train = score(h_train, y_train)
print(f'训练集准确度为:{s_train}')
h_test = model(X_test, theta)
s_test = score(h_test, y_test)
print(f'测试集准确度为:{s_test}')

# finally show all drawings
plt.show()
