import pandas as pd
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.decomposition import PCA

# 加载数据集
# 加载训练集和测试集数据
train_data = pd.read_csv(r"D:\大数据挖掘分析\train.csv")
test_data = pd.read_csv(r"D:\大数据挖掘分析\test.csv")

# 数据预处理
# 数据描述
print(train_data.head())
print(train_data.describe())

# 数据清洗（这里不需要进行特定的数据清洗）

# 特征工程
X_train = train_data.drop('medv', axis=1)
y_train = train_data['medv']

# 处理测试集中缺失'medv'列的情况
try:
    X_test = test_data.drop('medv', axis=1)
    y_test = test_data['medv']
except KeyError:
    X_test = test_data
    y_test = None

# 数据转换和规约
numeric_features = X_train.columns
preprocessor = ColumnTransformer(transformers=[
    ('num', StandardScaler(), numeric_features)
])

# 数据离散化（这里以主成分分析为例）
pca = PCA(n_components=2)

# 建立管道
model_rf = Pipeline(steps=[
    ('preprocessor', preprocessor),
    ('pca', pca),
    ('regressor', RandomForestRegressor(random_state=42))
])

# 交叉验证评估
scores = cross_val_score(model_rf, X_train, y_train, cv=5, scoring='neg_mean_squared_error')
rmse_scores = np.sqrt(-scores)
print("交叉验证RMSE均值:", rmse_scores.mean())

# 模型调参
param_grid = {
    'regressor__n_estimators': [50, 100, 200],
    'regressor__max_depth': [None, 5, 10, 20]
}

grid_search = GridSearchCV(model_rf, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(X_train, y_train)

best_model = grid_search.best_estimator_
best_params = grid_search.best_params_
print("最佳参数:", best_params)

# 训练模型
best_model.fit(X_train, y_train)

# 模型评估
if y_test is not None:
    y_pred = best_model.predict(X_test)
    rmse = np.sqrt(mean_squared_error(y_test, y_pred))
    print("测试集RMSE:", rmse)

# 数据多维分析（PCA可视化）
X_train_pca = pca.fit_transform(preprocessor.fit_transform(X_train))
plt.figure(figsize=(8, 6))
sns.scatterplot(x=X_train_pca[:, 0], y=X_train_pca[:, 1], color='b')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.title('PCA Analysis')
plt.show()

# 特征重要性可视化
feat_importances = best_model.named_steps['regressor'].feature_importances_

# 确保特征重要性和特征列长度一致
if len(feat_importances) == len(X_train.columns):
    feat_importances_df = pd.DataFrame({'feature': X_train.columns, 'importance': feat_importances})
    feat_importances_df = feat_importances_df.sort_values(by='importance', ascending=False)

    plt.figure(figsize=(10, 6))
    sns.barplot(x='importance', y='feature', data=feat_importances_df)
    plt.xlabel('Feature Importance')
    plt.ylabel('Feature')
    plt.title('Feature Importance of Random Forest Model')
    plt.show()
else:
    print("错误：feat_importances 和 X_train.columns 的长度不一致")

# 从训练集中分离特征和目标变量
X = train_data.drop(columns=['ID', 'medv'])
y = train_data['medv']

# 划分训练集和验证集
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

# 初始化随机森林回归模型
model = RandomForestRegressor(n_estimators=100, random_state=42)

# 训练模型
model.fit(X_train, y_train)

# 在验证集上进行预测
y_pred = model.predict(X_val)

# 计算均方根误差（RMSE）
def root_mean_squared_error(y_true, y_pred):
    return np.sqrt(mean_squared_error(y_true, y_pred))

rmse = root_mean_squared_error(y_val, y_pred)
print("模型在验证集上的RMSE：", rmse)

# 特征重要性分析
feature_importance = model.feature_importances_
feature_names = X.columns
feature_importance_df = pd.DataFrame({'Feature': feature_names, 'Importance': feature_importance})
feature_importance_df = feature_importance_df.sort_values(by='Importance', ascending=False)

# 可视化特征重要性
plt.figure(figsize=(10, 6))
plt.barh(feature_importance_df['Feature'], feature_importance_df['Importance'])
plt.xlabel('Importance')
plt.ylabel('Feature')
plt.title('Feature Importance')
plt.show()