import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor

# ---------------------- 1. 数据加载 ----------------------
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
print(f"训练集形状: {train.shape}, 测试集形状: {test.shape}")

# 分离特征与目标变量（训练集的目标是 'SalePrice'）
X_train = train.drop('SalePrice', axis=1)
y_train = train['SalePrice']
X_test = test  # 测试集无目标变量，需预测

# ---------------------- 2. 数据预处理 ----------------------
# 提取数值型与分类型特征（根据 data_description.txt 手动分类）
numeric_features = [
    'LotFrontage', 'LotArea', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea',
    'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF',
    '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'BsmtFullBath', 'BsmtHalfBath',
    'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd',
    'Fireplaces', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'WoodDeckSF',
    'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal'
]
categorical_features = [
    'MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour',
    'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1',
    'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond',
    'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType',
    'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure',
    'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir',
    'Electrical', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType',
    'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence',
    'MiscFeature', 'MoSold', 'YrSold'
]

# 预处理流水线：数值型（填充+标准化）、分类型（填充+独热编码）
numeric_transformer = Pipeline(steps=[
    ('imputer', SimpleImputer(strategy='median')),  # 用中位数填充缺失值
    ('scaler', StandardScaler())  # 标准化
])

categorical_transformer = Pipeline(steps=[
    ('imputer', SimpleImputer(strategy='most_frequent')),  # 用众数填充缺失值
    ('onehot', OneHotEncoder(handle_unknown='ignore'))  # 独热编码（忽略测试集未出现的类别）
])

preprocessor = ColumnTransformer(
    transformers=[
        ('num', numeric_transformer, numeric_features),
        ('cat', categorical_transformer, categorical_features)
    ])

# ---------------------- 3. 模型训练 ----------------------
# 使用XGBoost回归模型（性能优异的梯度提升树）
model = Pipeline(steps=[
    ('preprocessor', preprocessor),
    ('regressor', XGBRegressor(
        n_estimators=1000,  # 树的数量
        learning_rate=0.05,  # 学习率
        max_depth=5,  # 树的最大深度
        random_state=42  # 随机种子
    ))
])

# 交叉验证评估（使用训练集的80%训练，20%验证）
X_tr, X_val, y_tr, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
model.fit(X_tr, y_tr)
val_pred = model.predict(X_val)
rmse = np.sqrt(mean_squared_error(y_val, val_pred))
print(f"验证集RMSE: {rmse:.2f}")  # RMSE越小，模型越好

# ---------------------- 4. 预测测试集并保存结果 ----------------------
test_pred = model.predict(X_test)
submission = pd.DataFrame({
    'Id': test['Id'],
    'SalePrice': test_pred
})
submission.to_csv('D:/project/heimabigdata/kaggle项目班/lesson2/house price/submission.csv', index=False)
print("测试集预测结果已保存至 submission.csv")
