"""
完整特征工程流程案例
演示从原始数据到模型训练的完整特征工程过程
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, RobustScaler, PolynomialFeatures
from sklearn.feature_selection import SelectKBest, f_regression, RFE
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import Ridge, Lasso
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import joblib
import warnings
warnings.filterwarnings('ignore')

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False

print("=" * 80)
print("完整特征工程流程案例：加州房价预测")
print("=" * 80)

# ============================================================================
# 步骤1：数据加载与探索
# ============================================================================

print("\n" + "=" * 80)
print("步骤1：数据加载与探索")
print("=" * 80)

# 加载数据
housing = fetch_california_housing()
X = housing.data
y = housing.target
feature_names = housing.feature_names

# 创建DataFrame
df = pd.DataFrame(X, columns=feature_names)
df['target'] = y

print(f"\n数据集形状：{df.shape}")
print(f"特征数量：{len(feature_names)}")
print(f"样本数量：{len(df)}")

print("\n特征名称：")
for i, name in enumerate(feature_names):
    print(f"{i+1}. {name}")

print("\n数据统计信息：")
print(df.describe())

print("\n缺失值检查：")
print(df.isnull().sum())

print("\n目标变量分布：")
print(f"最小值: {y.min():.2f}")
print(f"最大值: {y.max():.2f}")
print(f"均值: {y.mean():.2f}")
print(f"中位数: {np.median(y):.2f}")

# ============================================================================
# 步骤2：数据可视化
# ============================================================================

print("\n" + "=" * 80)
print("步骤2：数据可视化")
print("=" * 80)

fig, axes = plt.subplots(3, 3, figsize=(18, 15))
fig.suptitle('特征分布与相关性分析', fontsize=16, fontweight='bold')

# 绘制各特征的分布
for i, feature in enumerate(feature_names):
    row = i // 3
    col = i % 3
    axes[row, col].hist(df[feature], bins=50, edgecolor='black', alpha=0.7)
    axes[row, col].set_title(f'{feature}的分布')
    axes[row, col].set_xlabel('值')
    axes[row, col].set_ylabel('频数')
    axes[row, col].grid(True, alpha=0.3)

# 目标变量分布
axes[2, 2].hist(y, bins=50, edgecolor='black', alpha=0.7, color='red')
axes[2, 2].set_title('目标变量(房价)的分布')
axes[2, 2].set_xlabel('房价')
axes[2, 2].set_ylabel('频数')
axes[2, 2].grid(True, alpha=0.3)

plt.tight_layout()
plt.savefig('/Users/binming/Desktop/CodeBase/python/机器学习/特征工程/6-综合案例/特征分布.png',
            dpi=300, bbox_inches='tight')
print("特征分布图已保存")

# 相关性矩阵
fig, ax = plt.subplots(figsize=(10, 8))
correlation_matrix = df.corr()
sns.heatmap(correlation_matrix, annot=True, fmt='.2f', cmap='coolwarm',
            center=0, ax=ax, square=True, linewidths=1)
ax.set_title('特征相关性矩阵', fontsize=14, fontweight='bold')
plt.tight_layout()
plt.savefig('/Users/binming/Desktop/CodeBase/python/机器学习/特征工程/6-综合案例/相关性矩阵.png',
            dpi=300, bbox_inches='tight')
print("相关性矩阵已保存")

# 与目标变量的相关性
target_corr = correlation_matrix['target'].drop('target').sort_values(ascending=False)
print("\n与目标变量的相关性：")
print(target_corr)

# ============================================================================
# 步骤3：数据分割
# ============================================================================

print("\n" + "=" * 80)
print("步骤3：数据分割")
print("=" * 80)

X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42
)

print(f"训练集大小: {X_train.shape}")
print(f"测试集大小: {X_test.shape}")

# ============================================================================
# 步骤4：特征缩放
# ============================================================================

print("\n" + "=" * 80)
print("步骤4：特征缩放")
print("=" * 80)

# 比较不同的缩放方法
scalers = {
    'StandardScaler': StandardScaler(),
    'RobustScaler': RobustScaler()
}

scaler_results = {}

for scaler_name, scaler in scalers.items():
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    # 训练简单模型评估
    model = Ridge(alpha=1.0, random_state=42)
    model.fit(X_train_scaled, y_train)
    y_pred = model.predict(X_test_scaled)
    
    r2 = r2_score(y_test, y_pred)
    rmse = np.sqrt(mean_squared_error(y_test, y_pred))
    
    scaler_results[scaler_name] = {'R2': r2, 'RMSE': rmse}
    print(f"\n{scaler_name}:")
    print(f"  R2 Score: {r2:.4f}")
    print(f"  RMSE: {rmse:.4f}")

# 选择最佳缩放器
best_scaler_name = max(scaler_results, key=lambda x: scaler_results[x]['R2'])
print(f"\n选择缩放器: {best_scaler_name}")

scaler = scalers[best_scaler_name]
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# ============================================================================
# 步骤5：特征构造
# ============================================================================

print("\n" + "=" * 80)
print("步骤5：特征构造")
print("=" * 80)

# 创建多项式特征
print("\n5.1 创建多项式特征（2次）")
poly = PolynomialFeatures(degree=2, include_bias=False)
X_train_poly = poly.fit_transform(X_train_scaled)
X_test_poly = poly.transform(X_test_scaled)

print(f"原始特征数: {X_train_scaled.shape[1]}")
print(f"多项式特征数: {X_train_poly.shape[1]}")
print(f"新增特征数: {X_train_poly.shape[1] - X_train_scaled.shape[1]}")

# 创建领域特征
print("\n5.2 创建领域特征")
df_train = pd.DataFrame(X_train, columns=feature_names)
df_test = pd.DataFrame(X_test, columns=feature_names)

# 房间密度 = 总房间数 / 家庭数
df_train['RoomsPerHousehold'] = df_train['AveRooms'] * df_train['AveOccup']
df_test['RoomsPerHousehold'] = df_test['AveRooms'] * df_test['AveOccup']

# 卧室比例 = 卧室数 / 总房间数
df_train['BedroomRatio'] = df_train['AveBedrms'] / (df_train['AveRooms'] + 1e-10)
df_test['BedroomRatio'] = df_test['AveBedrms'] / (df_test['AveRooms'] + 1e-10)

# 人口密度 = 人口 / 家庭数
df_train['PopulationPerHousehold'] = df_train['Population'] / (df_train['HouseAge'] + 1)
df_test['PopulationPerHousehold'] = df_test['Population'] / (df_test['HouseAge'] + 1)

print("新增特征：")
print("1. RoomsPerHousehold: 每户房间数")
print("2. BedroomRatio: 卧室比例")
print("3. PopulationPerHousehold: 人口密度")

X_train_engineered = df_train.values
X_test_engineered = df_test.values

# 缩放新特征
X_train_engineered = scaler.fit_transform(X_train_engineered)
X_test_engineered = scaler.transform(X_test_engineered)

print(f"\n特征工程后的特征数: {X_train_engineered.shape[1]}")

# ============================================================================
# 步骤6：特征选择
# ============================================================================

print("\n" + "=" * 80)
print("步骤6：特征选择")
print("=" * 80)

# 6.1 单变量特征选择
print("\n6.1 单变量特征选择 (F-test)")
selector_f = SelectKBest(f_regression, k=10)
X_train_selected = selector_f.fit_transform(X_train_engineered, y_train)
X_test_selected = selector_f.transform(X_test_engineered)

# 获取选择的特征
feature_scores = selector_f.scores_
feature_names_eng = list(feature_names) + ['RoomsPerHousehold', 'BedroomRatio', 'PopulationPerHousehold']
selected_features = [feature_names_eng[i] for i in selector_f.get_support(indices=True)]

print(f"选择的特征数: {len(selected_features)}")
print("选择的特征:")
for i, feature in enumerate(selected_features):
    print(f"{i+1}. {feature}")

# 6.2 基于模型的特征选择
print("\n6.2 基于随机森林的特征选择")
rf_selector = RandomForestRegressor(n_estimators=100, random_state=42)
rf_selector.fit(X_train_engineered, y_train)

# 获取特征重要性
importances = rf_selector.feature_importances_
importance_df = pd.DataFrame({
    'feature': feature_names_eng,
    'importance': importances
}).sort_values('importance', ascending=False)

print("\n特征重要性排名（前10）：")
print(importance_df.head(10))

# 选择重要性高的特征
threshold = np.median(importances)
important_features = importance_df[importance_df['importance'] > threshold]['feature'].tolist()
print(f"\n选择重要性高于中位数的特征数: {len(important_features)}")

# ============================================================================
# 步骤7：特征降维
# ============================================================================

print("\n" + "=" * 80)
print("步骤7：特征降维 (PCA)")
print("=" * 80)

# 应用PCA
pca = PCA(n_components=0.95)  # 保留95%的方差
X_train_pca = pca.fit_transform(X_train_engineered)
X_test_pca = pca.transform(X_test_engineered)

print(f"原始特征数: {X_train_engineered.shape[1]}")
print(f"PCA后特征数: {X_train_pca.shape[1]}")
print(f"保留的方差比例: {sum(pca.explained_variance_ratio_):.4f}")

# ============================================================================
# 步骤8：模型训练与评估
# ============================================================================

print("\n" + "=" * 80)
print("步骤8：模型训练与评估")
print("=" * 80)

# 准备不同的特征集
feature_sets = {
    '原始特征': (X_train_scaled, X_test_scaled),
    '特征工程': (X_train_engineered, X_test_engineered),
    '特征选择': (X_train_selected, X_test_selected),
    'PCA降维': (X_train_pca, X_test_pca)
}

# 测试不同的模型
models = {
    'Ridge': Ridge(alpha=1.0, random_state=42),
    'Lasso': Lasso(alpha=0.1, random_state=42),
    'RandomForest': RandomForestRegressor(n_estimators=100, random_state=42),
    'GradientBoosting': GradientBoostingRegressor(n_estimators=100, random_state=42)
}

results = []

for feature_name, (X_tr, X_te) in feature_sets.items():
    for model_name, model in models.items():
        # 训练模型
        model.fit(X_tr, y_train)
        
        # 预测
        y_pred = model.predict(X_te)
        
        # 评估
        r2 = r2_score(y_test, y_pred)
        rmse = np.sqrt(mean_squared_error(y_test, y_pred))
        mae = mean_absolute_error(y_test, y_pred)
        
        results.append({
            '特征集': feature_name,
            '模型': model_name,
            'R2': r2,
            'RMSE': rmse,
            'MAE': mae
        })

# 创建结果DataFrame
results_df = pd.DataFrame(results)
print("\n模型评估结果：")
print(results_df.to_string(index=False))

# 找出最佳组合
best_result = results_df.loc[results_df['R2'].idxmax()]
print(f"\n最佳组合：")
print(f"特征集: {best_result['特征集']}")
print(f"模型: {best_result['模型']}")
print(f"R2 Score: {best_result['R2']:.4f}")
print(f"RMSE: {best_result['RMSE']:.4f}")
print(f"MAE: {best_result['MAE']:.4f}")

# ============================================================================
# 步骤9：可视化结果
# ============================================================================

print("\n" + "=" * 80)
print("步骤9：可视化结果")
print("=" * 80)

fig, axes = plt.subplots(2, 2, figsize=(16, 12))

# R2对比
pivot_r2 = results_df.pivot(index='特征集', columns='模型', values='R2')
pivot_r2.plot(kind='bar', ax=axes[0, 0], width=0.8)
axes[0, 0].set_title('R2 Score对比', fontsize=12, fontweight='bold')
axes[0, 0].set_ylabel('R2 Score')
axes[0, 0].set_xlabel('特征集')
axes[0, 0].legend(title='模型', bbox_to_anchor=(1.05, 1), loc='upper left')
axes[0, 0].grid(True, alpha=0.3, axis='y')
axes[0, 0].tick_params(axis='x', rotation=45)

# RMSE对比
pivot_rmse = results_df.pivot(index='特征集', columns='模型', values='RMSE')
pivot_rmse.plot(kind='bar', ax=axes[0, 1], width=0.8)
axes[0, 1].set_title('RMSE对比', fontsize=12, fontweight='bold')
axes[0, 1].set_ylabel('RMSE')
axes[0, 1].set_xlabel('特征集')
axes[0, 1].legend(title='模型', bbox_to_anchor=(1.05, 1), loc='upper left')
axes[0, 1].grid(True, alpha=0.3, axis='y')
axes[0, 1].tick_params(axis='x', rotation=45)

# 特征重要性
top_features = importance_df.head(10)
axes[1, 0].barh(top_features['feature'], top_features['importance'])
axes[1, 0].set_xlabel('重要性')
axes[1, 0].set_title('Top 10 特征重要性', fontsize=12, fontweight='bold')
axes[1, 0].grid(True, alpha=0.3, axis='x')

# 预测vs实际（使用最佳模型）
best_feature_set = feature_sets[best_result['特征集']]
best_model = models[best_result['模型']]
best_model.fit(best_feature_set[0], y_train)
y_pred_best = best_model.predict(best_feature_set[1])

axes[1, 1].scatter(y_test, y_pred_best, alpha=0.5, s=20)
axes[1, 1].plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 
                'r--', lw=2, label='完美预测')
axes[1, 1].set_xlabel('实际值')
axes[1, 1].set_ylabel('预测值')
axes[1, 1].set_title(f'预测vs实际 ({best_result["模型"]})', fontsize=12, fontweight='bold')
axes[1, 1].legend()
axes[1, 1].grid(True, alpha=0.3)

plt.tight_layout()
plt.savefig('/Users/binming/Desktop/CodeBase/python/机器学习/特征工程/6-综合案例/模型评估结果.png',
            dpi=300, bbox_inches='tight')
print("模型评估结果图已保存")

# ============================================================================
# 步骤10：创建完整Pipeline
# ============================================================================

print("\n" + "=" * 80)
print("步骤10：创建完整Pipeline")
print("=" * 80)

# 创建最优Pipeline
final_pipeline = Pipeline([
    ('scaler', StandardScaler()),
    ('feature_selection', SelectKBest(f_regression, k=10)),
    ('model', GradientBoostingRegressor(n_estimators=100, random_state=42))
])

# 训练Pipeline
final_pipeline.fit(X_train_engineered, y_train)

# 评估
y_pred_pipeline = final_pipeline.predict(X_test_engineered)
r2_pipeline = r2_score(y_test, y_pred_pipeline)
rmse_pipeline = np.sqrt(mean_squared_error(y_test, y_pred_pipeline))

print(f"\nPipeline性能：")
print(f"R2 Score: {r2_pipeline:.4f}")
print(f"RMSE: {rmse_pipeline:.4f}")

# 保存Pipeline
pipeline_path = '/Users/binming/Desktop/CodeBase/python/机器学习/特征工程/6-综合案例/final_pipeline.pkl'
joblib.dump(final_pipeline, pipeline_path)
print(f"\nPipeline已保存到: {pipeline_path}")

# 保存缩放器
scaler_path = '/Users/binming/Desktop/CodeBase/python/机器学习/特征工程/6-综合案例/scaler.pkl'
joblib.dump(scaler, scaler_path)
print(f"缩放器已保存到: {scaler_path}")

# ============================================================================
# 总结
# ============================================================================

print("\n" + "=" * 80)
print("完整特征工程流程总结")
print("=" * 80)
print("""
本案例演示了完整的特征工程流程：

1. **数据加载与探索**
   - 了解数据规模、特征类型
   - 检查缺失值、异常值
   - 统计描述性分析

2. **数据可视化**
   - 特征分布分析
   - 相关性分析
   - 识别潜在问题

3. **数据分割**
   - 训练集/测试集分离
   - 避免数据泄露

4. **特征缩放**
   - 比较不同缩放方法
   - 选择最适合的方法

5. **特征构造**
   - 多项式特征
   - 领域知识特征
   - 特征交叉

6. **特征选择**
   - 单变量选择
   - 基于模型的选择
   - 降低维度、提升性能

7. **特征降维**
   - PCA降维
   - 保留主要信息

8. **模型训练与评估**
   - 多种模型对比
   - 多种特征集对比
   - 选择最优组合

9. **结果可视化**
   - 性能对比
   - 特征重要性
   - 预测效果

10. **Pipeline构建**
    - 自动化流程
    - 便于部署
    - 保证一致性

关键要点：
✓ 特征工程是迭代过程，需要多次尝试
✓ 结合领域知识创建有意义的特征
✓ 使用Pipeline确保训练和预测一致
✓ 交叉验证评估特征工程效果
✓ 保存所有预处理步骤用于部署
""")

print("\n" + "=" * 80)
print("案例完成！所有文件已保存。")
print("=" * 80)
