import joblib
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestRegressor, BaggingRegressor, AdaBoostRegressor
from sklearn.linear_model import LinearRegression,Ridge,Lasso
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler


# 读取数据
data = pd.read_csv("train.csv")

# 丢弃缺失比例 >30%的列
missing_features = data.isnull().mean()
high_missing_features = missing_features[missing_features > 0.3].index.tolist()
data.drop(high_missing_features, axis=1, inplace=True)

# 对LotFrontage行缺失处理
data = data.dropna(subset=["LotFrontage"], axis=0)
# data = data[data["LotArea"] < 200000]
# 删除剩余缺失值
data = data.dropna()

# 特征列表

important_features = [
    "OverallQual", "GrLivArea", "1stFlrSF",
    "TotRmsAbvGrd", "FullBath", "YearBuilt", "GarageCars",
    "LotArea"
]

X = data[important_features]
# X = data.iloc[:,1:-1]
# X = pd.get_dummies(X)
y = data["SalePrice"]


df_corr = X.copy()
df_corr["SalePrice"] = y

# plt.figure(figsize=(12, 10))
# corr_matrix = df_corr.corr()
# sns.heatmap(corr_matrix, annot=True, fmt=".2f", cmap="coolwarm")
# plt.title("特征相关性热力图")
# plt.show()
# -------------------------------
# 随机森林训练
# -------------------------------
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
#
# param_grid = {
#     "n_estimators": [300, 500],
#     "max_depth": [10, 15, 20, None],
#     "min_samples_split": [2, 5],
#     "min_samples_leaf": [1, 2]
# }
#
# # 初始化模型
# rf_base = RandomForestRegressor(random_state=42)
#
# # 网格搜索
# grid_search = GridSearchCV(
#     estimator=rf_base,
#     param_grid=param_grid,
#     cv=5,
#     scoring="neg_mean_squared_error",
# )
#
# grid_search.fit(x_train, y_train)
#
# print("最佳参数:", grid_search.best_params_)
# print("最佳得分 (CV RMSE):", np.sqrt(-grid_search.best_score_))


rf1 = RandomForestRegressor(
    n_estimators=550,
    max_depth=15,
    min_samples_split=2,
    min_samples_leaf=1,
    random_state=42
)
rf1.fit(x_train, y_train)

rf = BaggingRegressor(estimator=LinearRegression(), max_samples=0.8, n_estimators=500, random_state=42)
rf.fit(x_train, y_train)

# 保存模型
# joblib.dump(rf1, "rf.pkl")
# joblib.dump(rf, "line.pkl")

y_pred = rf.predict(x_test)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
r2 = rf.score(x_test, y_test)

plt.figure(figsize=(8,6))
plt.scatter(y_test, y_pred, alpha=0.5, color="blue")
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], color="red", linewidth=2)
plt.xlabel("实际房价")
plt.ylabel("预测房价")
plt.title("预测值 vs 实际值")
plt.show()

print("测试集 RMSE:", rmse)
print("测试集 R^2:", r2)
print("特征重要性:", rf1.feature_importances_)

# import pandas as pd
# from sqlalchemy import create_engine
#
# engine = create_engine("mysql+pymysql://root:ak47qbz95@localhost:3306/hqyj?charset=utf8mb4")
#
# X = data[important_features].copy()
# X["SalePrice"] = y
# X.insert(0, "id", range(1, len(X)+1))
# X.to_sql("house_data", con=engine, if_exists="replace", index=False)
#
# df = pd.read_sql("SELECT * FROM house_data LIMIT 5", engine)
# print(df)
#

