import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# 导入数据
path = '/home/wdd/Desktop/ML_CODE/Linear_Regression/ex1data1.txt'
data = pd.read_csv(path, header=None, names=['Population', 'Profit'])  # 当文件没有表头时利用name参数进行赋值
# print(data.head())
# print(data.shape)

# 作出散点图
data.plot.scatter(x='Population', y='Profit',label='Train Data')
# plt.show()

# 训练数据
x = np.array(data.iloc[:, 0]).reshape((-1, 1))
y = np.array(data.iloc[:, 1]).reshape((-1, 1))
# 给x数据添加一列全为一的数据
x = np.insert(x, 0, values=[1] * 72, axis=1)
print(x.shape)
# 定义变量theta=[w,b],并初始化为0
# theta = np.zeros([2, 1])
theta = [0, 0]

print(np.array(x[:, 1]).reshape(([-1, 1])))


# 定义代价函数
def computeCost(x, y, theta):
    cost = np.sum(np.power((np.dot(x, np.array(theta).reshape([2, 1])) - y), 2)) / (2 * len(x))
    return cost


print(computeCost(x, y, theta))


# 梯度下降
def gradientDescent(x, y, theta, alpha, iters):
    temp = [0, 0]
    cost = []
    for i in range(iters):
        cost.append(computeCost(x, y, theta))
        error = np.dot(x, np.array(theta).reshape([2, 1])) - y
        term = np.multiply(error, np.array(x[:, 1]).reshape(([-1, 1])))
        temp[0] = theta[0] - (alpha * np.sum(error) / len(x))
        temp[1] = theta[1] - (alpha * np.sum(term) / len(x))
        theta[0] = temp[0]
        theta[1] = temp[1]

    return theta, cost


alpha = 0.01
Theta, Cost = gradientDescent(x, y, theta, alpha, 1500)
print(Theta, Cost)
x = np.linspace(data.Population.min(), data.Population.max(), 100)
f = theta[0] + theta[1] * x
plt.plot(x, f, 'r', label='Prediction')
plt.legend()
plt.show()
