import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import time
from IPython import display
import threading


# %matplotlib inline

# y = mx + b
# m is slope, b is y-intercept
# 求误差函数，就是预测值和实际值相减然后求平方，最后再取平均值
def compute_error_for_line_given_points(b, m, m2, points):
    totalError = 0
    # 将数据分别赋值为x,y
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        z = points[i, 2]
        totalError += (z - (m * x + m2 * y + b)) ** 2
    return totalError / float(len(points))


# 梯度下降算法实现，迭代一次后输出一组参数值
def step_gradient(b_current, m_current, m2_current, points, learningRate):
    b_gradient = 0
    m_gradient = 0
    m2_gradient = 0
    N = float(len(points))
    # for i in range(0, len(points)):
    #     x = points[i, 0]
    #     y = points[i, 1]
    #     b_gradient += -(2/N) * (y - ((m_current * x) + b_current))
    #     m_gradient += -(2/N) * x * (y - ((m_current * x) + b_current))
    for i in range(0, len(points)):
        x = points[i, 0]
        x2 = points[i, 1]
        y = points[i, 2]
        # b_gradient += - (y - ((m_current * x) + b_current))
        # m_gradient += - x * (y - ((m_current * x) + b_current))
        b_gradient += - (y - ((m_current * x + m2_current * x2) + b_current))
        m_gradient += - x * (y - ((m_current * x + m2_current * x2) + b_current))
        m2_gradient += - x2 * (y - ((m_current * x + m2_current * x2) + b_current))

    new_b = b_current - (learningRate * b_gradient / N)
    new_m = m_current - (learningRate * m_gradient / N)
    new_m2 = m2_current - (learningRate * m2_gradient / N)

    return [new_b, new_m, new_m2]


gM = 0
gM2 = 0
gB = 0

gData = []
gPoint = []
fig = plt.figure()
ax = Axes3D(fig)

def refreshGraphic():
    while 1:
        print("\rAfter {0} iterations b = {1}, m = {2}, error = {3}"
              .format(1000, gB, gM, compute_error_for_line_given_points(gB, gM, gM2, gPoint)), end='', flush=True)

        # time.sleep(0.5)
        plt.pause(0.04)

        plt.clf()

        # plt.plot(gData.x, gData.y, "bo")
        # plt.plot(gData.x, gData.x * gM + gB)
        # display.clear_output(wait=True)


        print(gData.x, gData.y, gData.x * gM + gData.y * gM2 + gB)
        ax.plot_surface(gData.x, gData.y, gData.x * gM + gData.y * gM2 + gB, rstride=1, cstride=1, cmap='GnBu')
        plt.show()


def gradient_descent_runner(data, points, starting_b, starting_m, starting_m2, learning_rate, num_iterations):
    global gM
    global gM2
    global gB
    global gData
    gData = data
    print('gradient_descent_runner')
    print(gData.x, gData.y, gData.x * gM + gData.y * gM2 + gB)
    b = starting_b
    m = starting_m
    m2 = starting_m2

    for i in range(num_iterations):
        b, m, m2 = step_gradient(b, m, m2, np.array(points), learning_rate)
        gB = b
        gM = m
        gM2 = m2
        # plt.clf()
        # plt.plot(data.x,data.y,"bo")
        # plt.plot(data.x,data.x*m+b)
        # plt.draw()

        # plt.draw()
        time.sleep(0.01)
        # print ("\rAfter {0} iterations b = {1}, m = {2}, error = {3}"
        #        .format(num_iterations, b, m, compute_error_for_line_given_points(b, m, points)),end='',flush=True)

        # plt.show()

    return [b, m]


def run():
    global gPoint
    points = np.genfromtxt('data.csv', delimiter=",")
    z = np.zeros(len(points), dtype=float, order='F')
    for i in range(len(points)):  # 第二个实例
        z[i] = ((points[i][0] + points[i][1]) / 2)
        # points[i] = np.concatenate((points[i], [z]))
        # np.append(points[i],z)
        # np.resize(points[i],3)
    # print(z)
    z = np.split(z, len(z))
    print(z)
    points = np.concatenate((points, z), axis=1)

    # 连接上 z
    print(points)

    gPoint = points

    # data = pd.read_csv('data.csv', names=['x', 'y'])
    data = pd.DataFrame(gPoint, columns=['x', 'y', 'z'])
    print(data)
    print(data.x)
    print(data.y)
    print(data.z)

    learning_rate = 0.000001
    initial_b = 0  # initial y-intercept guess
    initial_m = 0  # initial slope guess
    initial_m2 = 0
    num_iterations = 10000
    # plt.plot(data.x,data.y,'bo')

    print("Starting gradient descent at b = {0}, m = {1}, error = {2}".format(initial_b, initial_m,
                                                                              compute_error_for_line_given_points(
                                                                                  initial_b, initial_m, initial_m2,
                                                                                  points)))
    print("Running...")
    t = threading.Thread(target=gradient_descent_runner,
                         args=[data, points, initial_b, initial_m, initial_m2, learning_rate, num_iterations])
    t.start()
    # [b, m] = gradient_descent_runner()
    refreshGraphic()
    t.join()
    # plt.plot(data.x,data.x*m+b)
    # plt.show()


if __name__ == '__main__':
    run()
