#!D:/CODE/python
# -*- coding: utf-8 -*-
# @Time : 2020/5/28 10:51
# @Author : Alexdong
# @Site : 
# @File : demo.py
# @Software: PyCharm
# Functional description: 梯度下降实现一元线性回归

import numpy as np
from matplotlib import pyplot as plt

# 读取数据
data = np.genfromtxt('data.csv', delimiter=',')
x_data = data[:, 0]
y_data = data[:, 1]
# plt.scatter(x_data, y_data)
# plt.show()

lr = 0.0001  # 学习率
k = 0       # theta1
b = 0       # theta0
epochs = 10

def compute_loss(x_data, y_data, b, k):  # 计算损失函数
    """均方误差 损失函数"""
    m = float(len(x_data))
    sum = 0
    for i in range(0, len(x_data)):
        sum += (y_data[i] - (k*x_data[i] + b))**2
    return sum/(2*m)


def gradient(x_data, y_data, k, b, lr, epochs):  # 进行梯度下降
    m = float(len(x_data))


    for i in range(0, epochs):
        k_gradient = 0
        b_gradiet = 0
        # 梯度下降
        for j in range(0, len(x_data)):
            # 目标函数对w(K)的微分
            k_gradient += (1/m)*((x_data[j] * k + b) - y_data[j]) * x_data[j]
            b_gradiet += (1/m)*((x_data[j] * k + b) - y_data[j])
                         # * x_data[j]

        k -= lr * k_gradient
        b -= lr * b_gradiet


        if i % 2 == 0:
            print(i)
            plt.plot(x_data, y_data, 'b.')
            plt.plot(x_data, k*x_data + b, 'r')
            plt.show()

    return k, b

k, b = gradient(x_data, y_data, 0, 0, lr, epochs)

plt.plot(x_data, k * x_data + b, 'r')
plt.plot(x_data, y_data, 'b.')
print('loss =:', compute_loss(x_data, y_data, b, k), 'b =:',b, 'k =:', k)
plt.show()