import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error


# 创建一个具有一些线性关系的示例数据
np.random.seed(0)
X = np.random.rand(100, 1)
y = 2.5 + 3.7 * X.squeeze() + np.random.normal(0, 0.2, 100)

# 拆分数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)

# 使用线性回归模型进行拟合
lr = LinearRegression()
lr.fit(X_train, y_train)

# 使用决策树回归模型进行拟合
dt = DecisionTreeRegressor(max_depth=3)
dt.fit(X_train, y_train)

# 生成预测数据
X_range = np.linspace(0, 1, 100).reshape(-1, 1)

# 绘制数据点和拟合直线
plt.figure(figsize=(10, 6))
plt.scatter(X, y, c='b', label='Data')
plt.plot(X_range, lr.predict(X_range), c='r', label='Linear Regression')
plt.plot(X_range, dt.predict(X_range), c='g', label='Decision Tree Regressor')
plt.xlabel('X')
plt.ylabel('y')
plt.title('Comparison of Linear Regression and Decision Tree Regressor')
plt.legend()
plt.show()

# 输出模型的均方误差
print("Linear Regression MSE:", mean_squared_error(y_test, lr.predict(X_test)))
print("Decision Tree Regressor MSE:", mean_squared_error(y_test, dt.predict(X_test)))
