# 数据预处理
import numpy as np
import pandas as pd
import statsmodels.api
from matplotlib import pyplot as plt
from matplotlib.font_manager import fontManager
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import OneHotEncoder, StandardScaler, PolynomialFeatures
from sklearn.model_selection import train_test_split
import matplotlib as mlp

df = pd.read_csv("salary_multi.csv")
# 数据预处理 - Label Encoding， 关注大小
dict_label = {'高中': 0, '本科': 1, '硕士': 2}
df['EducationLevel'] = df['EducationLevel'].map(dict_label)
# 数据预处理 - One Hot Encoding，关注分类
df[['CityA', 'CityB', 'CityC']] = OneHotEncoder().fit_transform(df[['City']]).toarray()
# 通过'CityA', 'CityB'可以推导出'CityC', 故把CityC删掉，减少特征数量减少计算
df.drop(['City', 'CityC'], axis=1, inplace=True)
print(df.head())

# 数据拆分
X = df.drop(['Salary'], axis=1)
y = df['Salary']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
# 转换为numpy
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)


def compute_cost(x, y, w, b):
    """
    cost function 成本函数，计算预测值到实际值的误差
    y = w1*x1+w2*x2+w3*x3+...+b
    需要找出最优的w1,w2,w3...和b
    """
    # 预测函数
    y_pred = (w * x).sum(axis=1) + b
    # 距离差的平方
    cost = (y - y_pred) ** 2
    # 均值 np.mean
    return cost.mean()


def compute_gradient(x, y, w, b):
    """
    根据w和b的梯度/斜率(微积分)来更新w和b
    """
    y_pred = (w * x).sum(axis=1) + b
    w_gradient = np.zeros(x.shape[1])
    # 省略 2* 倍数关联到后续的学习率控制
    b_gradient = (y_pred - y).mean()
    # y_pred=w1*x1+w2*x2+w3*x3+w4*x4+b
    # (y_pred-y)²=(w1*x1+w2*x2+w3*x3+w4*x4+b-y)²
    # w1_gradient=2*x1*(w1*x1+w2*x2+w3*x3+w4*x4+b-y)=2*x1(y_pred-y)
    for i in range(x.shape[1]):
        w_gradient[i] = (x[:, i] * (y_pred - y)).mean()
    return w_gradient, b_gradient


def gradient_decsent(x, y, w, b, times=1000):
    """
    gradient descent 梯度下降
    :return:
    """
    learning_rate = 1.0e-2  # 学习率，可调整，方便调参
    for i in range(times):
        w_gradient, b_gradient = compute_gradient(x, y, w, b)
        w = w - learning_rate * w_gradient
        b = b - learning_rate * b_gradient
        if i % 1000 == 0:
            print("i={},w={},b={},cost={}".format(i, w, b, compute_cost(x, y, w, b)))
    return w, b


# 特征缩放 Feature Scaling, 标准法处理加速梯度下降
# stander_scaler = StandardScaler()
# stander_scaler.fit(X_train)
# X_train = stander_scaler.transform(X_train)
# X_test = stander_scaler.transform(X_test)
# print(X_train)
w = np.array([1, 2, 3, 4])
b = 1
w, b = gradient_decsent(X_train, y_train, w, b, 10000)

fontManager.addfont("ChineseFont.ttf")
mlp.rc('font', family='ChineseFont', size=12)

# 计算预测值
y_pred = np.dot(X_train, w) + b
# 绘制原始数据点
plt.scatter(y_train, y_pred, color='blue', label='Original data vs Predicted data')
# 绘制对角线
plt.plot([y.min(), y.max()], [y.min(), y.max()], linestyle='--', color='red', label='Diagonal line')
plt.xlabel('Actual values')
plt.ylabel('Predicted values')
plt.title('Actual vs Predicted values')
plt.legend()
plt.show()


def draw(X, y):
    X = X[:, 0].reshape(-1, 1)
    print(X)
    print(y)
    # 创建多项式特征
    poly = PolynomialFeatures(degree=X.shape[1])
    X_poly = poly.fit_transform(X)
    # 添加常数项
    X_poly = statsmodels.api.add_constant(X_poly)
    # 训练线性回归模型
    model = LinearRegression()
    model.fit(X_poly, y.ravel())
    # 生成用于绘图的X的值
    x_min, x_max = X.min(), X.max()
    # 保持 X_plot 为二维数组
    X_plot = np.linspace(x_min, x_max, 100).reshape(-1, 1)
    # 应用相同的多项式转换
    X_plot_poly = poly.transform(X_plot)
    # 绘制原始数据点
    plt.scatter(X, y, color='blue', label='Original data')
    # 绘制拟合的多项式曲线
    y_plot = model.predict(X_plot_poly)
    plt.plot(X_plot, y_plot, color='red', label='Quadratic fit')
    plt.title('Quadratic Polynomial Regression')
    plt.xlabel('X')
    plt.ylabel('y')
    plt.legend()
    plt.show()


draw(X_train, y_train) # 数据量太小无法拟合出曲线
