import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from mpl_toolkits.mplot3d import Axes3D

plt.rcParams["font.sans-serif"] = ["SimHei"]
# 读入训练数据
train = pd.read_csv('../data/data2.txt', names=["x1", "x2", "y"])
X = np.array(train, "float32")
X = np.delete(X, [2], axis=1)
y = np.array(train["y"], "float32")

# 训练集和测试集的划分
X_train, X_test, y_train, y_test = train_test_split(X, y,
                                                    test_size=0.2, random_state=0)


# 极差标准化方法 预处理数据
def scaler(train, test):
    min = train.min(axis=0)  # x1 x2 训练集最小值
    max = train.max(axis=0)  # x1 x2 训练集最大值
    gap = max - min  # 最大值和最小值的差
    train -= min  # 所有数据减最小值
    train /= gap  # 所有数据除以大小值差
    test -= min  # 把训练集最小值应用于测试集
    test /= gap  # 把训练集大小值差应用于测试集
    return train, test  # 返回压缩后的数据


def min_max_gap(train):
    min = train.min(axis=0)
    max = train.max(axis=0)
    gap = max - min
    return min, max, gap


y_min, y_max, y_gap = min_max_gap(y_train)

X_train_copy = X_train.copy()

X_train, X_test = scaler(X_train, X_test)  # 对特征数据值归一化
y_train, y_test = scaler(y_train, y_test)  # 对标签数据值归一化


# 定义一个MSE均方误差函数
def loss_function(X, y, W):
    y_hat = X.dot(W.T)  # 这是假设函数,其中已经应用了Python的广播功能
    loss = y_hat - y.reshape((len(y_hat), 1))  # 求出每一个y’和训练集中真实的y之间的差异
    cost = np.sum(loss ** 2) / (2 * len(X))  # 这是均方误差函数的代码实现
    return cost  # 返回当前模型的均方误差值


# 首先确定参数的初始值
iterations = 500  # 迭代300次
alpha = 0.1  # 此处初始学习速率设为0.5
weight = np.array([[1, 1], ])

print("当前损失 w=1,1  alpha =0.1  迭代500次", loss_function(X_train, y_train, weight))


def gradient_descent(X, y, w, lr, iter):  # 定义一个实现梯度下降的函数
    l_history = np.zeros(iter)
    w_history = np.zeros((iter, 2))
    for i in range(iter):  # 进行梯度下降的迭代，就是下多少级台阶
        y_hat = X.dot(w.T)  # 这个是向量化运行实现的假设函数
        loss = y_hat - y.reshape((len(y_hat), 1))  # 这是中间过程,求得的是假设函数预测的y和真正的y值间的差值
        derivative_w = X.T.dot(loss) / len(X)  # 对权重求导, len(X)是样本总数
        derivative_w = derivative_w.T
        w = w - lr * derivative_w  # 结合下降速率alpha更新权重
        l_history[i] = loss_function(X, y, w)  # 梯度下降过程中损失的历史
        w_history[i] = w  # 梯度下降过程中权重的历史
    return l_history, w_history  # 返回梯度下降过程数据


def liner_regression(X, y, weight, alpha, iter):
    loss_history, weight_history = gradient_descent(X, y, weight, alpha, iter)
    print("训练最终损失", loss_history[-1])
    return loss_history, weight_history


# 多元回归训练
loss_history, weight_history = liner_regression(X_train, y_train, weight, alpha, iterations)

print("*" * 20)
print("损失值变化记录", loss_history)

print("*" * 20)

print("权重变化记录", weight_history)

plt.plot(np.arange(iterations), loss_history, 'g--', label='Loss Curve')
plt.xlabel('Iterations')  # x轴Label
plt.ylabel('Loss')  # y轴Label
plt.title('损失函数图形')  # y轴Label
plt.legend()  # 显示图例
plt.show()

w1 = weight_history[-1]
x1 = np.linspace(X_train[:, 0].min(), X_train[:, 0].max(), 100)
x2 = np.linspace(X_train[:, 1].min(), X_train[:, 1].max(), 100)
x1, x2 = np.meshgrid(x1, x2)
f = w1[0] * x1 + w1[1] * x2
fig = plt.figure()
Ax = Axes3D(fig)
Ax.plot_surface(x1, x2, f, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))
Ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c="r")
plt.title('散点拟合图形')  # y轴Label

plt.show()


def costs_fun(w1, w2):
    global X_train, y_train
    w = np.array([w1, w2])
    y_hat = X_train.dot(w.T)
    loss = y_hat.reshape(len(y_hat), 1) - y_train
    cost = np.sum(loss ** 2) / (2 * len(X_train))
    return cost


theat1 = np.arange(0.0, 1.0, 0.005)
theat2 = np.arange(0.0, 1.0, 0.005)
theat1, theat2 = np.meshgrid(theat1, theat2)
f = np.array(list(
    map(lambda t: costs_fun(t[0], t[1]),
        zip(theat1.flatten(), theat2.flatten()))
))
f = f.reshape(theat1.shape[0], -1)
fig = plt.figure()
Ax = Axes3D(fig)
Ax.plot_surface(theat1, theat2, f, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))
Ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c="r")
plt.title('损失函数随着w1 w2的变化图形')  # y轴Label
plt.show()

X_plan = np.random.randn(1650, 2)
X_train, X_plan = scaler(X_train_copy, X_plan)

n = weight_history.shape[0] - 1
t = weight_history[n, :].reshape(2, -1)
y_plan = np.dot(X_plan, t)

y_value = y_plan * y_gap + y_min
print("*" * 20)
print("预测值", y_value.astype(int))
