import numpy as np
import pandas as pd
import matplotlib.pyplot as plt


data = pd.read_csv('ex1data1.txt', names=['population', 'profit'])

data.plot.scatter('population', 'profit', label='population')
plt.show()

data.insert(0, 'ones', 1)


X = data.iloc[:, 0:-1]
y = data.iloc[:, -1]


# dataframe => ndarray
X = X.values
y = y.values
y = y.reshape(97, 1)


def costFunction(X, y, theta):
    inner = np.power(X @ theta - y, 2)
    return np.sum(inner) / (2 * len(X))


theta = np.zeros((2,1))

cost_init = costFunction(X, y, theta)
print(cost_init) # 32.072733877455676


def gradientDescent(X, y, theta, alpha, iters):
    costs = []
    print('x的转置')
    print(X.T)
    print('X @ theta - y')
    print(X @ theta - y)

    print('(X.T @ (X @ theta - y))')
    print(X.T @ (X @ theta - y))

    for i in range(iters):
        theta = theta - (X.T @ (X @ theta - y)) * alpha / len(X)
        print('theta')
        print(theta)
        cost = costFunction(X, y, theta)
        costs.append(cost)

        if i % 100 == 0:
            print(cost)

    return theta, costs


alpha = 0.02
iters = 2000

theta, costs = gradientDescent(X, y, theta, alpha, iters)


fig, ax = plt.subplots()
ax.plot(np.arange(iters), costs)
ax.set(xlabel='iters', ylabel='cost', title='cost vs iters')
plt.show()


x = np.linspace(y.min(), y.max())
y_ = theta[0, 0] + theta[1, 0] * x


fig, ax = plt.subplots()
ax.scatter(X[:, -1], y, label='training data')
ax.plot(x, y_, 'r', label='predict')
ax.legend()
ax.set(xlabel='population', ylabel='profit')
plt.show()



# 正规方程
def normalEquation(X, y):
    theta = np.linalg.inv(X.T @ X) @ X.T @ y
    return theta

theta = normalEquation(X, y)

print(theta)