{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "4d8078652b4c85e3",
   "metadata": {},
   "source": [
    "### 1.导入需要使用的库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "125feae406d269bb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "pd.set_option('display.max_rows', 500)\n",
    "pd.set_option('display.max_columns', 500)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e4df91c346559861",
   "metadata": {},
   "source": [
    "### 2.导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "eda8e6a4746d425d",
   "metadata": {},
   "outputs": [],
   "source": [
    "Ames  = pd.read_csv('../ames-housing-dataset/AmesHousing.csv')\n",
    "Train = pd.read_csv('../house-prices-advanced-regression-techniques/train.csv')\n",
    "Test  = pd.read_csv('../house-prices-advanced-regression-techniques/test.csv')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ed9fa3e933f46b85",
   "metadata": {},
   "source": [
    "### 3.特征对齐"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d0f32cad6a85236b",
   "metadata": {},
   "outputs": [],
   "source": [
    "cols_dic = dict(Ames_cols= Ames.columns,\n",
    "                Train_cols = Train.columns,\n",
    "                Test_cols= Test.columns)\n",
    "cols_df = pd.DataFrame.from_dict(cols_dic, orient='index').transpose()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b98d41a068f7ffea",
   "metadata": {},
   "outputs": [],
   "source": [
    "cols_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d535725d3b77a90d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 可以看到 ames 和后两列数据还是有些差距，需要进行对齐\n",
    "Ames = Ames.drop(['Order', 'PID'], axis = 1)\n",
    "Train = Train.drop('Id', axis = 1)\n",
    "# 查看对齐后结果\n",
    "# cols_dic = dict(Ames_cols= Ames.columns,\n",
    "#                 Train_cols = Train.columns)\n",
    "# cols_df = pd.DataFrame.from_dict(cols_dic, orient='index').transpose()\n",
    "# cols_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "82487c75a0a20d1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 重命名 awe 的特征\n",
    "cols_dic = dict(zip(Ames.columns, Train.columns))\n",
    "Ames = Ames.rename(columns=cols_dic)\n",
    "df_train = pd.concat([Train, Ames])\n",
    "\n",
    "# 查看重命名后结果\n",
    "# cols_dic = dict(Ames_cols= Ames.columns,\n",
    "#                 Train_cols = Train.columns)\n",
    "# cols_df = pd.DataFrame.from_dict(cols_dic, orient='index').transpose()\n",
    "# cols_df"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6ab42de4138893c2",
   "metadata": {},
   "source": [
    "### 4.删除重复项和重置索引"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "32016dd1beffaf17",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train = df_train.drop_duplicates()\n",
    "df_train = df_train.reset_index().drop('index', axis = 1)\n",
    "df_train['Id'] = range(len(df_train))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "73e497a0ea0cbf36",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "27937b4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_test = Test.copy()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1b6e58a4",
   "metadata": {},
   "source": [
    "### 5.变量关系检查"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dfa856b1",
   "metadata": {},
   "outputs": [],
   "source": [
    "sns.displot(df_train['SalePrice'], kde = True,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "58d13679",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['SalePrice'] = df_train['SalePrice'].apply(np.log)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f17cbf4c",
   "metadata": {},
   "outputs": [],
   "source": [
    "sns.displot(df_train['SalePrice'], kde = True)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "422dc221",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a9ac2d03",
   "metadata": {},
   "outputs": [],
   "source": [
    "all_data = pd.concat([df_train.drop('SalePrice', axis = 1), df_test])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "77615344",
   "metadata": {},
   "source": [
    "### 6.缺失数据检查\n",
    "    0 and None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b52d16ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def miss_data(data):\n",
    "    Miss_df = ((data == 0).sum()).to_frame()\n",
    "    Miss_df = Miss_df.rename(columns = {0: 'zeros'})\n",
    "    Miss_df.index.name = 'Feature'\n",
    "    Miss_df['np.nan'] = (data.isnull()).sum()\n",
    "    Miss_df['None'] = (data == \"None\").sum()\n",
    "    Miss_df['total'] = Miss_df['zeros'] + Miss_df['np.nan'] + Miss_df['None']\n",
    "    Miss_df['Percent'] = 100*Miss_df['total']/len(data)\n",
    "    Miss_df['Type'] = [data[i].dtype for i in Miss_df.index ]\n",
    "    return    Miss_df.sort_values(ascending = False, by = 'Percent')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4a91dabf",
   "metadata": {},
   "outputs": [],
   "source": [
    "miss_data(all_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fcc25d0a",
   "metadata": {},
   "source": [
    "### 7.数据清洗"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f445f0ad",
   "metadata": {},
   "source": [
    "**补全缺失数据**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55285d13",
   "metadata": {},
   "outputs": [],
   "source": [
    "cat_feat = ['GarageFinish', 'GarageQual', 'GarageCond', 'MSZoning',\n",
    "                  'Utilities', 'Functional', 'Exterior2nd', 'Exterior1st', 'SaleType', 'KitchenQual']\n",
    "num_feat = ['BsmtFullBath', 'BsmtHalfBath', 'GarageCars', 'GarageArea', 'TotalBsmtSF', 'BsmtUnfSF', 'BsmtFinSF2',\n",
    "                  'BsmtFinSF1']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ca39c9fe",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.impute import SimpleImputer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "846e57b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练数据\n",
    "cat_imputer_train         = SimpleImputer(missing_values = np.nan, strategy = 'most_frequent')\n",
    "df_train.loc[:,cat_feat]  = cat_imputer_train.fit_transform(df_train.loc[:, cat_feat])\n",
    "num_imputer_train         = SimpleImputer(missing_values= np.nan, strategy = 'mean')\n",
    "df_train.loc[:, num_feat] = num_imputer_train.fit_transform(df_train.loc[:, num_feat])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be3cea64",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试数据\n",
    "cat_imputer_test = SimpleImputer(missing_values = np.nan, strategy = 'most_frequent')\n",
    "df_test.loc[:,cat_feat] = cat_imputer_test.fit_transform(df_test.loc[:, cat_feat])\n",
    "num_imputer_test = SimpleImputer(missing_values= np.nan, strategy = 'mean')\n",
    "df_test.loc[:, num_feat] = num_imputer_test.fit_transform(df_test.loc[:, num_feat])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9644aaff",
   "metadata": {},
   "source": [
    "**加权构造新的数据特征**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a2a62a5d",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "df_train['Bath_total'] = df_train['FullBath'] + 0.5*df_train['HalfBath'] + df_train['BsmtFullBath'] + 0.5*df_train['BsmtHalfBath']\n",
    "df_test['Bath_total'] = df_test['FullBath'] + 0.5*df_test['HalfBath'] + df_test['BsmtFullBath'] + 0.5*df_test['BsmtHalfBath']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67d7795b",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['BsmtFinSF'] = df_train['BsmtFinSF1'] + df_train['BsmtFinSF2'] - df_train['BsmtUnfSF']\n",
    "df_test['BsmtFinSF'] = df_test['BsmtFinSF1'] + df_test['BsmtFinSF2'] - df_test['BsmtUnfSF']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c25423a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['Porch'] = df_train['OpenPorchSF'] + df_train['EnclosedPorch'] + df_train['3SsnPorch'] + df_train['ScreenPorch']\n",
    "df_test['Porch'] = df_test['OpenPorchSF'] + df_test['EnclosedPorch'] + df_test['3SsnPorch'] + df_test['ScreenPorch']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b39a795",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['FinSF'] = df_train['1stFlrSF'] + df_train['2ndFlrSF'] - df_train['LowQualFinSF']\n",
    "df_test['FinSF'] = df_test['1stFlrSF'] + df_test['2ndFlrSF'] - df_test['LowQualFinSF']"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "81943119",
   "metadata": {},
   "source": [
    "**去除数据缺失严重的特征列**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6b60dfc2",
   "metadata": {},
   "outputs": [],
   "source": [
    "cols_to_drop = ['PoolQC', 'PoolArea', 'MiscFeature', 'MiscVal', 'Alley', 'Fence', 'WoodDeckSF',\n",
    "            'FireplaceQu', 'Fireplaces', 'MasVnrType', 'MasVnrArea',\n",
    "            'FullBath', 'HalfBath' , 'BsmtFullBath' ,'BsmtHalfBath',\n",
    "            'BsmtFinSF1' , 'BsmtFinSF2' , 'BsmtUnfSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch',\n",
    "            '1stFlrSF', '2ndFlrSF', 'LowQualFinSF']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b9ffaff",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train = df_train.drop(cols_to_drop, axis = 1)\n",
    "df_test = df_test.drop(cols_to_drop, axis = 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8686699d",
   "metadata": {},
   "source": [
    "**处理np.nan数据**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b4cbdbe7",
   "metadata": {},
   "outputs": [],
   "source": [
    "Cols_to_None = ['BsmtQual','BsmtCond','BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'GarageType', \n",
    "                        'GarageFinish', 'GarageQual', 'GarageCond']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6422d272",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train[Cols_to_None] = df_train.loc[:,Cols_to_None].replace(to_replace = np.nan, value = \"None\", inplace = False)\n",
    "df_test[Cols_to_None] = df_test.loc[:,Cols_to_None].replace(to_replace = np.nan, value = \"None\", inplace = False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "049e1389",
   "metadata": {},
   "source": [
    "**0填充**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2daf5858",
   "metadata": {},
   "outputs": [],
   "source": [
    "cols_to_zero = ['GarageYrBlt', 'GarageArea', 'GarageCars']\n",
    "df_train[cols_to_zero] = df_train[cols_to_zero].fillna(0)\n",
    "df_test[cols_to_zero] = df_test[cols_to_zero].fillna(0)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "612f7476",
   "metadata": {},
   "source": [
    "**使用XGBoost 回归模型来预测并填充 LotFrontage 这个特征的缺失值**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e16e59a9",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train_totrain = df_train[df_train['LotFrontage'].isnull() == False]\n",
    "df_train_totest = df_train[df_train['LotFrontage'].isnull() == True]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cc01018a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from xgboost import XGBRegressor\n",
    "reg = XGBRegressor(n_estimators = 5000, learning_rate = 0.01)\n",
    "reg.fit(df_train_totrain.select_dtypes(exclude = 'object').drop(['LotFrontage', 'SalePrice', 'Id'], axis = 1), \n",
    "        df_train_totrain['LotFrontage'], verbose = False)\n",
    "df_train_totest['LotFrontage'] = reg.predict(df_train_totest.select_dtypes(exclude = 'object').drop(['LotFrontage', 'SalePrice', 'Id'], axis = 1))\n",
    "df_train = pd.concat([df_train_totrain, df_train_totest])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e044e79",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_test_totrain = df_test[df_test['LotFrontage'].isnull() == False]\n",
    "df_test_totest = df_test[df_test['LotFrontage'].isnull() == True]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "33e62ee0",
   "metadata": {},
   "outputs": [],
   "source": [
    "from xgboost import XGBRegressor\n",
    "reg_test = XGBRegressor(n_estimators = 5000, learning_rate = 0.01)\n",
    "reg_test.fit(df_test_totrain.select_dtypes(exclude = 'object').drop(['LotFrontage', 'Id'], axis = 1), df_test_totrain['LotFrontage'], \n",
    "        verbose = False)\n",
    "df_test_totest['LotFrontage'] = reg_test.predict(df_test_totest.select_dtypes(exclude = 'object').drop(['LotFrontage', 'Id'], axis = 1))\n",
    "df_test = pd.concat([df_test_totrain, df_test_totest])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00eb93d5",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train = df_train.sort_values('Id', ascending = True)\n",
    "df_test = df_test.sort_values('Id', ascending = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9e64b538",
   "metadata": {},
   "outputs": [],
   "source": [
    "miss_data(data = df_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "03c83671",
   "metadata": {},
   "outputs": [],
   "source": [
    "miss_data(data = df_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "81057d66",
   "metadata": {},
   "source": [
    "**处理类别特征**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4c7be2e0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 转换日期特征\n",
    "date_cols = ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']\n",
    "df_train[date_cols] = round(df_train[date_cols]/5)*5\n",
    "df_test[date_cols] = round(df_test[date_cols]/5)*5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0535409f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def compare_cols(cols):\n",
    "    s=1\n",
    "    for i in cols:\n",
    "        print(f\"feature is: {i}\")\n",
    "        train_unique_count = len(df_train[i].unique())\n",
    "        test_unique_count = len(df_test[i].unique())\n",
    "        print(f\"# of train unique values is: {train_unique_count}\")\n",
    "        print(f\"# of test unique values is: {test_unique_count}\")\n",
    "        all_values = set(df_train[i]).union(set(df_test[i]))\n",
    "        D = pd.DataFrame({'Unique': list(all_values)})\n",
    "        D['train_count'] = [len(df_train[df_train[i] == j]) for j in all_values]\n",
    "        D['train_percent'] = (D['train_count']/ len(df_train)) * 100\n",
    "        D['test_count'] = [len(df_test[df_test[i] == j]) for j in all_values]\n",
    "        D['test_percent'] = (D['test_count']/ len(df_test)) * 100\n",
    "        print(D.sort_values('train_count', ascending = False))\n",
    "        print('--------------------')\n",
    "        print(f\"Total number of navigated features is: {s}\")\n",
    "        s+=1\n",
    "        print('--------------------') "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c650331",
   "metadata": {},
   "outputs": [],
   "source": [
    "compare_cols(cols = date_cols)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9518564d",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['YearBuilt'] = df_train['YearBuilt'].replace({1885.0:1890.0, 1870.0:1880.0, 1875.0:1880.0})\n",
    "df_train['GarageYrBlt'] = df_train['GarageYrBlt'].replace(to_replace= 1905.0, value= 1910.0, inplace = False)\n",
    "df_train['GarageYrBlt'] = df_train['GarageYrBlt'].replace(to_replace= 2205.0, value= 2005.0, inplace = False)\n",
    "df_test['GarageYrBlt'] = df_test['GarageYrBlt'].replace(to_replace= 2205.0, value= 2005.0, inplace = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ccd1307c",
   "metadata": {},
   "outputs": [],
   "source": [
    "compare_cols(cols = date_cols)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f7b80bed",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['MSSubClass'] = df_train['MSSubClass'].astype(str)\n",
    "df_train['YearBuilt'] = df_train['YearBuilt'].astype(str)\n",
    "df_train['YearRemodAdd'] = df_train['YearRemodAdd'].astype(str)\n",
    "df_train['GarageYrBlt'] = df_train['GarageYrBlt'].astype(str)\n",
    "df_train['YrSold'] = df_train['YrSold'].astype(str)\n",
    "df_train['MoSold'] = df_train['MoSold'].astype(str)\n",
    "\n",
    "df_test['MSSubClass'] = df_test['MSSubClass'].astype(str)\n",
    "df_test['YearBuilt'] = df_test['YearBuilt'].astype(str)\n",
    "df_test['YearRemodAdd'] = df_test['YearRemodAdd'].astype(str)\n",
    "df_test['GarageYrBlt'] = df_test['GarageYrBlt'].astype(str)\n",
    "df_test['YrSold'] = df_test['YrSold'].astype(str)\n",
    "df_test['MoSold'] = df_test['MoSold'].astype(str)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e39df66",
   "metadata": {},
   "outputs": [],
   "source": [
    "compare_cols(cols = df_train.select_dtypes(include= 'object').columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d70ef771",
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in df_train.select_dtypes(include= 'object').columns:\n",
    "    Max = max([len(df_train[df_train[i] == j]) for j in df_train[i].unique()])\n",
    "    Percent = round(100*Max/len(df_train))\n",
    "    if Percent >= 90:\n",
    "        print(i)\n",
    "    else:\n",
    "        pass"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "34cb61ba",
   "metadata": {},
   "source": [
    "**删除部分数据严重缺失的列**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "36506a93",
   "metadata": {},
   "outputs": [],
   "source": [
    "cols_to_drop_2 = ['Street', 'LandContour', 'Utilities', 'LandSlope', 'RoofMatl', 'BsmtCond',\n",
    "                  'Heating', 'CentralAir', 'Electrical', 'Functional', 'GarageQual', \n",
    "                  'GarageCond', 'PavedDrive']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a57c611c",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train = df_train.drop(cols_to_drop_2, axis = 1)\n",
    "df_test  = df_test.drop(cols_to_drop_2, axis = 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "730ef9ee",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['MSZoning'] = df_train['MSZoning'].replace(to_replace=['A (agr)', 'I (all)'], value = 'C (all)', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fd568bb7",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['Neighborhood'] = df_train['Neighborhood'].replace(to_replace=['Greens', 'GrnHill','Landmrk'], value = 'NAmes', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c26662ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['Condition'] = df_train['Condition1'] + '_' + df_train['Condition2']\n",
    "df_test['Condition'] = df_test['Condition1'] + '_' + df_test['Condition2']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4129de8b",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train = df_train.drop(['Condition1', 'Condition2'], axis =1)\n",
    "df_test = df_test.drop(['Condition1', 'Condition2'], axis =1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8e16b345",
   "metadata": {},
   "outputs": [],
   "source": [
    "condition_unique_vaues_totoal = set(df_train['Condition'].unique()).union(set(df_test['Condition'].unique()))\n",
    "unique_count_train = len(df_train['Condition'].unique())\n",
    "unique_count_test = len(df_test['Condition'].unique())\n",
    "print(f\"No. of train unique values is: {unique_count_train}\")\n",
    "print(f\"No. of test unique values is: {unique_count_test}\")\n",
    "df_count = pd.DataFrame({'Unique': list(condition_unique_vaues_totoal)})\n",
    "df_count['train_count'] = [len(df_train[df_train['Condition'] == j]) for j in condition_unique_vaues_totoal]\n",
    "df_count['train_percent'] = (df_count['train_count']/len(df_train)) * 100\n",
    "df_count['test_count'] = [len(df_test[df_test['Condition'] == j]) for j in condition_unique_vaues_totoal]\n",
    "df_count['test_percent'] = (df_count['test_count']/len(df_test)) * 100\n",
    "df_count.sort_values('train_count', ascending = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9eefdd1",
   "metadata": {},
   "outputs": [],
   "source": [
    "to_replace_train = [i for i in df_train['Condition'].unique() if df_train['Condition'].value_counts()[i] < 5]\n",
    "to_replace_test = [i for i in df_test['Condition'].unique() if df_test['Condition'].value_counts()[i] < 5]\n",
    "df_train['Condition'] = df_train['Condition'].replace(to_replace= to_replace_train, value = 'Other', inplace=False)\n",
    "df_test['Condition'] = df_test['Condition'].replace(to_replace = to_replace_test, value = 'Other', inplace = False)\n",
    "df_train['Condition'] = df_train['Condition'].replace(to_replace=['RRAn_Feedr', 'RRNn_Norm', 'RRNe_Norm', 'PosN_PosN', 'Feedr_Feedr']\n",
    "                                                              , value = 'Other', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e7bfc161",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['HouseStyle'] = df_train['HouseStyle'].replace(to_replace=['2.5Fin'], value = '2.5Unf', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6abde43a",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['Exterior1st'] = df_train['Exterior1st'].replace(to_replace=['Stone', 'PreCast', 'ImStucc'], value = 'BrkComm', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9ccb92c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['Exterior2nd'] = df_train['Exterior2nd'].replace(to_replace=['PreCast', 'Other'], value = 'CBlock', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ed7adf49",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['BsmtQual'] = df_train['BsmtQual'].replace(to_replace=['Po'], value = 'Fa', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2547ae67",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['KitchenQual'] = df_train['KitchenQual'].replace(to_replace=['Po'], value = 'Fa', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c1cc62f",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train['SaleType'] = df_train['SaleType'].replace(to_replace=['VWD'], value = 'Con', inplace=False)\n",
    "df_train['SaleType'] = df_train['SaleType'].replace(to_replace=['WD '], value = 'WD', inplace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "056f2f8f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 再次检查类别特征，确保没有错误\n",
    "compare_cols(cols = df_train.select_dtypes(include= 'object').columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aac0e4ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用ordinal encodings\n",
    "cols_OE = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC'\n",
    "           , 'KitchenQual', 'GarageFinish']\n",
    "dic = {'None':0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, 'No': 1, 'Mn': 2, 'Av': 3, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4\n",
    "       , 'ALQ': 5, 'GLQ': 6, 'RFn': 2, 'Fin': 3, 'N': 1, 'P': 2, 'Y': 3, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4,'Other':1}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2be36b32",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_train[cols_OE] = df_train[cols_OE].replace(dic, inplace = False)\n",
    "\n",
    "df_test[cols_OE] = df_test[cols_OE].replace(dic, inplace = False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2884550d",
   "metadata": {},
   "source": [
    "**对顺序编码后的剩余分类特征进行分类编码**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f83506f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import category_encoders as ce\n",
    "\n",
    "cols_CE = df_train.select_dtypes(include= 'object').columns\n",
    "cols_CE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "123cf7ce",
   "metadata": {},
   "outputs": [],
   "source": [
    "cbe = ce.cat_boost.CatBoostEncoder(a = 1)\n",
    "cbe.fit(df_train[cols_CE], df_train['SalePrice'])\n",
    "df_train[cols_CE] = cbe.transform(df_train[cols_CE])\n",
    "df_test[cols_CE] = cbe.transform(df_test[cols_CE])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b2d21295",
   "metadata": {},
   "source": [
    "**在扩充数据集上训练模型**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa53eaa7",
   "metadata": {},
   "outputs": [],
   "source": [
    "from xgboost import XGBRegressor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dba74fa6",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_xgb = XGBRegressor(\n",
    "                        n_estimators = 1000,\n",
    "                        max_depth = 8,\n",
    "                        subsample = 0.6,\n",
    "                        #min_child_weight = 60, \n",
    "                        learning_rate = 0.1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e5d87392",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_xgb.fit(df_train.drop(['Id', 'SalePrice'], axis =1), df_train['SalePrice'],\n",
    "            verbose = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a00db2ee",
   "metadata": {},
   "outputs": [],
   "source": [
    "y_test = model_xgb.predict(df_test.drop('Id', axis = 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ec772340",
   "metadata": {},
   "outputs": [],
   "source": [
    "y_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "709fd526",
   "metadata": {},
   "outputs": [],
   "source": [
    "sub = pd.DataFrame({'Id': range(1461,2920), 'SalePrice': np.exp(y_test)})\n",
    "sub = sub.set_index('Id')\n",
    "sub.to_csv('submission.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be5debf4",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_xgb2 = XGBRegressor(\n",
    "                        n_estimators = 1000,\n",
    "                        max_depth = 10,\n",
    "                        subsample = 0.6,\n",
    "                        #min_child_weight = 60, \n",
    "                        learning_rate = 0.1)\n",
    "model_xgb2.fit(df_train.drop(['Id', 'SalePrice'], axis =1), df_train['SalePrice'],\n",
    "            verbose = False)\n",
    "y_test2 = model_xgb2.predict(df_test.drop('Id', axis = 1))\n",
    "sub2 = pd.DataFrame({'Id': range(1461,2920), 'SalePrice': np.exp(y_test2)})\n",
    "sub2 = sub2.set_index('Id')\n",
    "sub2.to_csv('submission2.csv')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "42577065",
   "metadata": {},
   "source": [
    "***十个模型间对比***\n",
    "| 模型名称           | 构造器及参数说明                                                                 |\n",
    "|--------------------|----------------------------------------------------------------------------------|\n",
    "| LinearRegression   | `LinearRegression()`                                                             |\n",
    "| Ridge              | `Ridge()`                                                                        |\n",
    "| Lasso              | `Lasso()`                                                                        |\n",
    "| ElasticNet         | `ElasticNet()`                                                                   |\n",
    "| RandomForest       | `RandomForestRegressor(n_estimators=100, random_state=42)`                      |\n",
    "| GradientBoosting   | `GradientBoostingRegressor(n_estimators=100, random_state=42)`                  |\n",
    "| AdaBoost           | `AdaBoostRegressor(n_estimators=100, random_state=42)`                          |\n",
    "| KNeighbors         | `KNeighborsRegressor()`                                                           |\n",
    "| SVR                | `SVR()`                                                                           |\n",
    "| XGBoost            | `XGBRegressor(n_estimators=1000, max_depth=10, subsample=0.6, learning_rate=0.1, verbosity=0)` |\n",
    "| LightGBM           | `LGBMRegressor(n_estimators=100, random_state=42)`                              |\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4286a8c2",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import time\n",
    "\n",
    "# 回归模型\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n",
    "\n",
    "from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n",
    "from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor\n",
    "from sklearn.svm import SVR\n",
    "from sklearn.neighbors import KNeighborsRegressor\n",
    "\n",
    "from xgboost import XGBRegressor\n",
    "from lightgbm import LGBMRegressor\n",
    "\n",
    "X = df_train.drop(['Id', 'SalePrice'], axis=1)\n",
    "y = df_train['SalePrice']\n",
    "X_test = df_test.drop(['Id'], axis=1)\n",
    "\n",
    "# 🔀 拆分训练/验证集（验证集用于评估）\n",
    "X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# 📊 模型定义（10个模型）\n",
    "models = {\n",
    "    \"LinearRegression\": LinearRegression(),\n",
    "    \"Ridge\": Ridge(),\n",
    "    \"Lasso\": Lasso(),\n",
    "    \"ElasticNet\": ElasticNet(),\n",
    "    \"RandomForest\": RandomForestRegressor(n_estimators=100, random_state=42),\n",
    "    \"GradientBoosting\": GradientBoostingRegressor(n_estimators=100, random_state=42),\n",
    "    \"AdaBoost\": AdaBoostRegressor(n_estimators=100, random_state=42),\n",
    "    \"KNeighbors\": KNeighborsRegressor(),\n",
    "    \"SVR\": SVR(),\n",
    "    \"XGBoost\": XGBRegressor(n_estimators=1000, max_depth=10, subsample=0.6, learning_rate=0.1, verbosity=0),\n",
    "    \"LightGBM\": LGBMRegressor(n_estimators=100, random_state=42)\n",
    "}\n",
    "\n",
    "# 📈 模型评估结果保存\n",
    "results = []\n",
    "\n",
    "for name, model in models.items():\n",
    "    start = time.time()\n",
    "    model.fit(X_train, y_train)\n",
    "    elapsed_time = time.time() - start\n",
    "\n",
    "    # 🔮 验证集预测\n",
    "    y_pred = model.predict(X_valid)\n",
    "\n",
    "    # 逆对数还原\n",
    "    y_valid_exp = np.exp(y_valid)\n",
    "    y_pred_exp = np.exp(y_pred)\n",
    "\n",
    "    # 📏 评估指标\n",
    "    rmse = mean_squared_error(y_valid_exp, y_pred_exp, squared=False)\n",
    "    mae = mean_absolute_error(y_valid_exp, y_pred_exp)\n",
    "    r2 = r2_score(y_valid_exp, y_pred_exp)\n",
    "\n",
    "    results.append({\n",
    "        \"Model\": name,\n",
    "        \"Train Time (s)\": round(elapsed_time, 3),\n",
    "        \"RMSE\": round(rmse, 2),\n",
    "        \"MAE\": round(mae, 2),\n",
    "        \"R^2\": round(r2, 4)\n",
    "    })\n",
    "\n",
    "    # 💾 结果保存\n",
    "\n",
    "    y_test_pred = model.predict(X_test)\n",
    "    y_test_pred_exp = np.exp(y_test_pred)\n",
    "\n",
    "    # 📤 导出预测结果\n",
    "    sub = pd.DataFrame({\n",
    "        \"Id\": df_test[\"Id\"],\n",
    "        \"SalePrice\": y_test_pred_exp\n",
    "    })\n",
    "    sub.to_csv(f\"./comparison_results/submission_{name}.csv\", index=False)\n",
    "\n",
    "# 📃 保存评估结果\n",
    "results_df = pd.DataFrame(results).sort_values(by=\"RMSE\").reset_index(drop=True)\n",
    "results_df.to_csv(\"./comparison_results/model_comparison_results.csv\", index=False)\n",
    "print(results_df)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4896c2ac",
   "metadata": {},
   "source": [
    "***模型间对比可视化***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f82af9ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "\n",
    "# 🌈 设置科研风格\n",
    "sns.set(style=\"whitegrid\", font_scale=1.2)\n",
    "plt.rcParams[\"font.family\"] = [\"Times New Roman\",\"SimHei\"]  # 科研常用字体\n",
    "plt.rcParams[\"axes.unicode_minus\"] = False\n",
    "plt.rcParams[\"figure.dpi\"] = 300\n",
    "\n",
    "# 📥 读取结果\n",
    "df = pd.read_csv(\"./comparison_results/model_comparison_results.csv\")\n",
    "\n",
    "# 🎨 颜色定义\n",
    "colors = sns.color_palette(\"viridis\", len(df))\n",
    "\n",
    "# 📊 1. RMSE + MAE 对比柱状图\n",
    "plt.figure(figsize=(12, 6))\n",
    "bar_width = 0.35\n",
    "x = range(len(df))\n",
    "\n",
    "plt.bar(x, df['RMSE'], width=bar_width, label='RMSE', color=colors)\n",
    "plt.bar([p + bar_width for p in x], df['MAE'], width=bar_width, label='MAE', color=sns.color_palette(\"magma\", len(df)))\n",
    "\n",
    "plt.xticks([p + bar_width/2 for p in x], df['Model'], rotation=45, ha='right')\n",
    "plt.ylabel(\"误差\")\n",
    "plt.title(\"各回归模型的 RMSE 与 MAE 对比\")\n",
    "plt.legend()\n",
    "plt.tight_layout()\n",
    "plt.savefig(\"./comparison_results/rmse_mae_comparison.png\", bbox_inches='tight')\n",
    "plt.show()\n",
    "\n",
    "# 📈 2. R² 得分对比折线图\n",
    "plt.figure(figsize=(10, 5))\n",
    "sns.lineplot(data=df, x=\"Model\", y=\"R^2\", marker='o', color=\"teal\", linewidth=2.5)\n",
    "plt.xticks(rotation=45, ha='right')\n",
    "plt.ylabel(\"R²\")\n",
    "plt.title(\"各模型 R² 得分\")\n",
    "plt.tight_layout()\n",
    "plt.savefig(\"./comparison_results/r2_comparison.png\", bbox_inches='tight')\n",
    "plt.show()\n",
    "\n",
    "# ⏱️ 3. 训练时间对比柱状图\n",
    "plt.figure(figsize=(10, 5))\n",
    "sns.barplot(data=df, x=\"Model\", y=\"Train Time (s)\", palette=\"Blues_d\")\n",
    "plt.xticks(rotation=45, ha='right')\n",
    "plt.ylabel(\"训练时间（秒）\")\n",
    "plt.title(\"各模型训练时间对比\")\n",
    "plt.tight_layout()\n",
    "plt.savefig(\"./comparison_results/train_time_comparison.png\", bbox_inches='tight')\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dca5543d",
   "metadata": {},
   "source": [
    "***模型内不同超参数对比***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "52052851",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import time\n",
    "import os\n",
    "import itertools # For generating hyperparameter combinations\n",
    "import shutil # For cleaning up directories if needed\n",
    "\n",
    "# Regression models\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n",
    "\n",
    "from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n",
    "from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor\n",
    "from sklearn.svm import SVR\n",
    "from sklearn.neighbors import KNeighborsRegressor\n",
    "\n",
    "from xgboost import XGBRegressor\n",
    "from lightgbm import LGBMRegressor\n",
    "\n",
    "# 加载数据集\n",
    "X = df_train.drop(['Id', 'SalePrice'], axis=1)\n",
    "y = df_train['SalePrice']\n",
    "X_test_submission = df_test.drop(['Id'], axis=1)\n",
    "\n",
    "# 🔀 拆分训练/验证集（验证集用于评估）\n",
    "X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "\n",
    "# 📊 Base Model Definitions (these will also be used as templates for tuning)\n",
    "models = {\n",
    "    \"LinearRegression\": LinearRegression(),\n",
    "    \"Ridge\": Ridge(random_state=42),\n",
    "    \"Lasso\": Lasso(random_state=42, max_iter=2000), # Increased max_iter\n",
    "    \"ElasticNet\": ElasticNet(random_state=42, max_iter=2000), # Increased max_iter\n",
    "    \"RandomForest\": RandomForestRegressor(random_state=42),\n",
    "    \"GradientBoosting\": GradientBoostingRegressor(random_state=42),\n",
    "    \"AdaBoost\": AdaBoostRegressor(random_state=42),\n",
    "    \"KNeighbors\": KNeighborsRegressor(),\n",
    "    \"SVR\": SVR(),\n",
    "    \"XGBoost\": XGBRegressor(random_state=42, verbosity=0), # verbosity=0 to suppress XGBoost training output\n",
    "    \"LightGBM\": LGBMRegressor(random_state=42, verbose=-1) # verbose=-1 to suppress LightGBM training output\n",
    "}\n",
    "\n",
    "# 🛠️ Hyperparameter Grids for Tuning (add more models/params as needed)\n",
    "# Keep grids small for faster demonstration\n",
    "param_grids = {\n",
    "    \"Ridge\": {\"alpha\": [0.1, 0.2, 0.5, 1.0, 2, 5, 10.0]},\n",
    "    \"Lasso\": {\"alpha\": [0.001, 0.005, 0.01, 0.05, 0.1, 0.2, ]},\n",
    "    \"ElasticNet\": {\"alpha\": [0.01, 0.05, 0.1, 0.2], \"l1_ratio\": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]},\n",
    "    \"RandomForest\": {\"n_estimators\": [50, 100, 200, 300, 400, 500], \"max_depth\": [5, 6, 7, 8, 9, 10, None]},\n",
    "    \"GradientBoosting\": {\"n_estimators\": [50, 100, 200, 300, 400, 500], \"learning_rate\": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], \"max_depth\": [3, 4, 5, 6, 7, 8, 9, 10]},\n",
    "    \"AdaBoost\": {\"n_estimators\": [50, 100, 200, 300, 400, 500], \"learning_rate\": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]}, # Can be slow\n",
    "    \"KNeighbors\": {\"n_neighbors\": [3,4, 5, 6, 7, 8, 9], \"weights\": ['uniform', 'distance']},\n",
    "    #\"SVR\": {\"C\": [0.1, 1, 10], \"kernel\": ['linear', 'rbf']}, # SVR can be slow\n",
    "    \"XGBoost\": {\"n_estimators\": [50, 100, 200, 300, 400, 500], \"max_depth\": [3, 4, 5, 6], \"learning_rate\": [0.05, 0.1]},\n",
    "    \"LightGBM\": {\"n_estimators\": [50, 100, 200, 300, 400, 500], \"learning_rate\": [0.02 , 0.04, 0.06, 0.08, 0.1], \"num_leaves\": [20, 22, 24, 26, 28, 30]}\n",
    "}\n",
    "\n",
    "# 📁 Create directories for results\n",
    "output_base_dir = \"./comparison_results\"\n",
    "hyperparam_tuning_dir = os.path.join(output_base_dir, \"hyperparameter_tuning\")\n",
    "submission_dir = os.path.join(output_base_dir, \"submissions_best_tuned\") # Submissions from best tuned models\n",
    "\n",
    "# Clean up previous results if you want a fresh run\n",
    "# if os.path.exists(output_base_dir):\n",
    "#     shutil.rmtree(output_base_dir)\n",
    "\n",
    "os.makedirs(hyperparam_tuning_dir, exist_ok=True)\n",
    "os.makedirs(submission_dir, exist_ok=True)\n",
    "\n",
    "# 📈 Overall model comparison results (includes defaults and best tuned)\n",
    "overall_comparison_results = []\n",
    "\n",
    "for model_name, model_template in models.items():\n",
    "    print(f\"\\n--- Processing Model: {model_name} ---\")\n",
    "\n",
    "    # 1. Evaluate the default model (as in your original script)\n",
    "    print(f\"  Evaluating default {model_name}...\")\n",
    "    start_default = time.time()\n",
    "    # Ensure all base models have their defined parameters (e.g., random_state, verbosity)\n",
    "    current_eval_model = model_template\n",
    "    current_eval_model.fit(X_train, y_train)\n",
    "    elapsed_time_default = time.time() - start_default\n",
    "\n",
    "    y_pred_default = current_eval_model.predict(X_valid)\n",
    "    y_valid_exp = np.exp(y_valid) # Inverse transform validation target\n",
    "    y_pred_default_exp = np.exp(y_pred_default) # Inverse transform predictions\n",
    "\n",
    "    rmse_default = mean_squared_error(y_valid_exp, y_pred_default_exp, squared=False)\n",
    "    mae_default = mean_absolute_error(y_valid_exp, y_pred_default_exp)\n",
    "    r2_default = r2_score(y_valid_exp, y_pred_default_exp)\n",
    "\n",
    "    default_model_stats = {\n",
    "        \"Model\": model_name,\n",
    "        \"Type\": \"Default\",\n",
    "        \"Hyperparameters\": \"Default\",\n",
    "        \"Train Time (s)\": round(elapsed_time_default, 3),\n",
    "        \"RMSE\": round(rmse_default, 2),\n",
    "        \"MAE\": round(mae_default, 2),\n",
    "        \"R^2\": round(r2_default, 4)\n",
    "    }\n",
    "    overall_comparison_results.append(default_model_stats)\n",
    "    print(f\"  Default {model_name} - RMSE: {rmse_default:.2f}, R^2: {r2_default:.4f}\")\n",
    "\n",
    "    # --- Hyperparameter Tuning for the current model ---\n",
    "    model_specific_tuning_results = []\n",
    "    best_tuned_model_for_submission = None # Store the best model instance\n",
    "    best_rmse_for_submission = float('inf')\n",
    "\n",
    "    if model_name in param_grids:\n",
    "        grid = param_grids[model_name]\n",
    "        param_keys = list(grid.keys())\n",
    "        # Generate all combinations of hyperparameter values\n",
    "        param_value_combinations = list(itertools.product(*[grid[pk] for pk in param_keys]))\n",
    "        \n",
    "        print(f\"  Tuning {model_name} with {len(param_value_combinations)} combinations...\")\n",
    "\n",
    "        for i, combo_values in enumerate(param_value_combinations):\n",
    "            current_params = dict(zip(param_keys, combo_values))\n",
    "            \n",
    "            # Merge with model_template's initial params, then override with current_params\n",
    "            # This ensures things like random_state, verbosity are kept if not in grid\n",
    "            merged_params = model_template.get_params()\n",
    "            merged_params.update(current_params)\n",
    "\n",
    "            # Create a new model instance with the current combination of hyperparameters\n",
    "            tuned_model = model_template.__class__(**merged_params)\n",
    "            \n",
    "            # print(f\"    Trying params: {current_params}\") # For detailed logging\n",
    "            try:\n",
    "                start_tune = time.time()\n",
    "                tuned_model.fit(X_train, y_train)\n",
    "                elapsed_time_tune = time.time() - start_tune\n",
    "\n",
    "                y_pred_tune = tuned_model.predict(X_valid)\n",
    "                y_pred_tune_exp = np.exp(y_pred_tune) # Inverse transform\n",
    "\n",
    "                rmse_tune = mean_squared_error(y_valid_exp, y_pred_tune_exp, squared=False)\n",
    "                mae_tune = mean_absolute_error(y_valid_exp, y_pred_tune_exp)\n",
    "                r2_tune = r2_score(y_valid_exp, y_pred_tune_exp)\n",
    "\n",
    "                result_row = {\n",
    "                    \"Model\": model_name,\n",
    "                    \"Train Time (s)\": round(elapsed_time_tune, 3),\n",
    "                    \"RMSE\": round(rmse_tune, 2),\n",
    "                    \"MAE\": round(mae_tune, 2),\n",
    "                    \"R^2\": round(r2_tune, 4)\n",
    "                }\n",
    "                result_row.update(current_params) # Add current hyperparameters to the row\n",
    "                model_specific_tuning_results.append(result_row)\n",
    "                # print(f\"    Combo {i+1}/{len(param_value_combinations)} | RMSE: {rmse_tune:.2f} | Params: {current_params}\")\n",
    "\n",
    "                if rmse_tune < best_rmse_for_submission:\n",
    "                    best_rmse_for_submission = rmse_tune\n",
    "                    best_tuned_model_for_submission = tuned_model\n",
    "\n",
    "            except Exception as e:\n",
    "                print(f\"    Error with {model_name} and params {current_params}: {e}\")\n",
    "                error_row = {\n",
    "                    \"Model\": model_name,\n",
    "                    \"Train Time (s)\": 0,\n",
    "                    \"RMSE\": float('inf'),\n",
    "                    \"MAE\": float('inf'),\n",
    "                    \"R^2\": float('-inf')\n",
    "                }\n",
    "                error_row.update(current_params)\n",
    "                model_specific_tuning_results.append(error_row)\n",
    "                continue\n",
    "        \n",
    "        if model_specific_tuning_results:\n",
    "            model_hyperparam_df = pd.DataFrame(model_specific_tuning_results)\n",
    "            model_hyperparam_df.sort_values(by=\"RMSE\", inplace=True)\n",
    "            \n",
    "            # Save hyperparameter tuning results for THIS model\n",
    "            tuning_csv_path = os.path.join(hyperparam_tuning_dir, f\"{model_name}_hyperparameter_tuning.csv\")\n",
    "            model_hyperparam_df.to_csv(tuning_csv_path, index=False)\n",
    "            print(f\"  Saved hyperparameter tuning results for {model_name} to {tuning_csv_path}\")\n",
    "\n",
    "            # Add the BEST of these to the overall_comparison_results\n",
    "            best_tuned_result = model_hyperparam_df.iloc[0].to_dict()\n",
    "            tuned_params_str = str({k: v for k, v in best_tuned_result.items() if k in param_keys})\n",
    "            \n",
    "            overall_comparison_results.append({\n",
    "                \"Model\": model_name,\n",
    "                \"Type\": \"Tuned\",\n",
    "                \"Hyperparameters\": tuned_params_str,\n",
    "                \"Train Time (s)\": best_tuned_result[\"Train Time (s)\"],\n",
    "                \"RMSE\": best_tuned_result[\"RMSE\"],\n",
    "                \"MAE\": best_tuned_result[\"MAE\"],\n",
    "                \"R^2\": best_tuned_result[\"R^2\"]\n",
    "            })\n",
    "            print(f\"  Best tuned {model_name} - RMSE: {best_tuned_result['RMSE']:.2f}, R^2: {best_tuned_result['R^2']:.4f} with params: {tuned_params_str}\")\n",
    "\n",
    "            # Generate submission file with the best tuned model for this type\n",
    "            if best_tuned_model_for_submission:\n",
    "                y_test_pred_tuned = best_tuned_model_for_submission.predict(X_test_submission)\n",
    "                y_test_pred_tuned_exp = np.exp(y_test_pred_tuned)\n",
    "                sub_tuned = pd.DataFrame({\n",
    "                    \"Id\": df_test[\"Id\"], # Use original Id from df_test\n",
    "                    \"SalePrice\": y_test_pred_tuned_exp\n",
    "                })\n",
    "                sub_tuned.to_csv(os.path.join(submission_dir, f\"submission_{model_name}_best_tuned.csv\"), index=False)\n",
    "                print(f\"  Saved best tuned submission for {model_name}.\")\n",
    "\n",
    "    else:\n",
    "        print(f\"  No hyperparameter grid specified for {model_name}. Using default model for potential submission.\")\n",
    "        # If no tuning grid, the 'default' model is the best for this type\n",
    "        best_tuned_model_for_submission = current_eval_model # The already trained default model\n",
    "        # Optionally save submission for default if no tuning\n",
    "        y_test_pred_default_sub = best_tuned_model_for_submission.predict(X_test_submission)\n",
    "        y_test_pred_default_sub_exp = np.exp(y_test_pred_default_sub)\n",
    "        sub_default = pd.DataFrame({\n",
    "            \"Id\": df_test[\"Id\"],\n",
    "            \"SalePrice\": y_test_pred_default_sub_exp\n",
    "        })\n",
    "        sub_default.to_csv(os.path.join(submission_dir, f\"submission_{model_name}_default_as_best.csv\"), index=False)\n",
    "        print(f\"  Saved default submission for {model_name} as no tuning performed.\")\n",
    "\n",
    "\n",
    "# 📃 Save overall comparison results\n",
    "overall_results_df = pd.DataFrame(overall_comparison_results).sort_values(by=[\"RMSE\", \"Model\", \"Type\"]).reset_index(drop=True)\n",
    "overall_csv_path = os.path.join(output_base_dir, \"model_comparison_overall_results.csv\")\n",
    "overall_results_df.to_csv(overall_csv_path, index=False)\n",
    "\n",
    "print(\"\\n--- Overall Model Comparison (Defaults and Best Tuned) ---\")\n",
    "print(overall_results_df)\n",
    "print(f\"\\nOverall results saved to {overall_csv_path}\")\n",
    "print(f\"Hyperparameter tuning details saved in: {hyperparam_tuning_dir}\")\n",
    "print(f\"Submission files from best models saved in: {submission_dir}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b7b8461d",
   "metadata": {},
   "source": [
    "***模型内不同超参数预测效果可视化***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5d73e1e0",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import os\n",
    "import glob\n",
    "\n",
    "# Style for plots\n",
    "plt.style.use('ggplot')\n",
    "\n",
    "# Directories\n",
    "base_dir = \"./comparison_results\"\n",
    "tuning_results_dir = os.path.join(base_dir, \"hyperparameter_tuning\")\n",
    "visualizations_output_dir = os.path.join(base_dir, \"visualizations\")\n",
    "\n",
    "os.makedirs(visualizations_output_dir, exist_ok=True)\n",
    "\n",
    "# Find all hyperparameter tuning CSV files\n",
    "tuning_files = glob.glob(os.path.join(tuning_results_dir, \"*_hyperparameter_tuning.csv\"))\n",
    "\n",
    "if not tuning_files:\n",
    "    print(f\"No tuning files found in {tuning_results_dir}. Exiting.\")\n",
    "    exit()\n",
    "\n",
    "print(f\"Found {len(tuning_files)} tuning files. Generating visualizations...\")\n",
    "\n",
    "for csv_file_path in tuning_files:\n",
    "    model_name_from_file = os.path.basename(csv_file_path).replace(\"_hyperparameter_tuning.csv\", \"\")\n",
    "    print(f\"\\n--- Visualizing for: {model_name_from_file} ---\")\n",
    "    \n",
    "    try:\n",
    "        df_tune = pd.read_csv(csv_file_path)\n",
    "    except pd.errors.EmptyDataError:\n",
    "        print(f\"  Skipping {model_name_from_file}, CSV is empty.\")\n",
    "        continue\n",
    "    \n",
    "    if df_tune.empty:\n",
    "        print(f\"  Skipping {model_name_from_file}, DataFrame is empty after loading.\")\n",
    "        continue\n",
    "\n",
    "    # Identify hyperparameter columns (those not standard metric/info columns)\n",
    "    known_metric_info_cols = ['Model', 'Train Time (s)', 'RMSE', 'MAE', 'R^2']\n",
    "    hyperparam_cols = [col for col in df_tune.columns if col not in known_metric_info_cols]\n",
    "\n",
    "    if not hyperparam_cols:\n",
    "        print(f\"  No hyperparameter columns identified for {model_name_from_file}. Skipping.\")\n",
    "        continue\n",
    "    \n",
    "    # Ensure 'Model' column exists (should always be the case from script 1)\n",
    "    if 'Model' not in df_tune.columns:\n",
    "        print(f\"  'Model' column not found in {csv_file_path}. Skipping.\")\n",
    "        continue\n",
    "    \n",
    "    actual_model_name = df_tune['Model'].iloc[0] # Get model name from the DataFrame content\n",
    "\n",
    "    for metric_to_plot in ['RMSE', 'MAE', 'R^2']:\n",
    "        if metric_to_plot not in df_tune.columns:\n",
    "            print(f\"  Metric {metric_to_plot} not found for {actual_model_name}. Skipping this metric.\")\n",
    "            continue\n",
    "\n",
    "        plt.figure(figsize=(12, 7))\n",
    "        \n",
    "        title = f\"{actual_model_name}: {metric_to_plot} vs Hyperparameters\"\n",
    "        plot_save_name = f\"{actual_model_name}_{metric_to_plot}_tuning.png\"\n",
    "\n",
    "        if len(hyperparam_cols) == 1:\n",
    "            param1 = hyperparam_cols[0]\n",
    "            # Convert to numeric if possible for better sorting on plot axis, else treat as categorical\n",
    "            try:\n",
    "                df_plot = df_tune.copy()\n",
    "                df_plot[param1] = pd.to_numeric(df_plot[param1])\n",
    "                df_plot = df_plot.sort_values(by=param1) # Sort for line plot\n",
    "            except ValueError: # Handles non-numeric params like 'kernel' names\n",
    "                df_plot = df_tune.sort_values(by=metric_to_plot) # Sort by metric if param is categorical\n",
    "\n",
    "            sns.lineplot(x=param1, y=metric_to_plot, data=df_plot, marker='o', legend=False)\n",
    "            # If param1 has few unique values, bar plot might be better\n",
    "            if df_plot[param1].nunique() < 10 :\n",
    "                 # Check if we already plotted line, clear and plot bar\n",
    "                 plt.clf() # Clear figure for new plot type\n",
    "                 sns.barplot(x=param1, y=metric_to_plot, data=df_plot, color=sns.color_palette()[0])\n",
    "\n",
    "\n",
    "            plt.xlabel(str(param1))\n",
    "            title = f\"{actual_model_name}: {metric_to_plot} vs {param1}\"\n",
    "\n",
    "        elif len(hyperparam_cols) == 2:\n",
    "            param1, param2 = hyperparam_cols[0], hyperparam_cols[1]\n",
    "            # Try to make param1 numeric for x-axis if possible\n",
    "            try:\n",
    "                df_plot = df_tune.copy()\n",
    "                df_plot[param1] = pd.to_numeric(df_plot[param1])\n",
    "                df_plot = df_plot.sort_values(by=param1)\n",
    "            except ValueError:\n",
    "                df_plot = df_tune.copy() # Keep as is if not numeric\n",
    "\n",
    "            # Ensure param2 is string for discrete hue categories\n",
    "            df_plot[param2] = df_plot[param2].astype(str)\n",
    "            \n",
    "            sns.lineplot(x=param1, y=metric_to_plot, hue=param2, data=df_plot, marker='o', legend=\"full\")\n",
    "            plt.xlabel(str(param1))\n",
    "            plt.legend(title=str(param2), bbox_to_anchor=(1.05, 1), loc='upper left')\n",
    "            title = f\"{actual_model_name}: {metric_to_plot} vs {param1} (by {param2})\"\n",
    "        \n",
    "        else: # More than 2 hyperparameters\n",
    "            print(f\"  {actual_model_name} has {len(hyperparam_cols)} hyperparameters. Visualizing {metric_to_plot} vs first two: {hyperparam_cols[0]} & {hyperparam_cols[1]}.\")\n",
    "            param1, param2 = hyperparam_cols[0], hyperparam_cols[1]\n",
    "            # Similar to 2 params case, plot against first, hue by second\n",
    "            try:\n",
    "                df_plot = df_tune.copy()\n",
    "                df_plot[param1] = pd.to_numeric(df_plot[param1])\n",
    "                df_plot = df_plot.sort_values(by=param1)\n",
    "            except ValueError:\n",
    "                df_plot = df_tune.copy()\n",
    "\n",
    "            df_plot[param2] = df_plot[param2].astype(str)\n",
    "\n",
    "            sns.lineplot(x=param1, y=metric_to_plot, hue=param2, data=df_plot, marker='o', legend=\"brief\")\n",
    "            plt.xlabel(str(param1))\n",
    "            plt.legend(title=str(param2), bbox_to_anchor=(1.05, 1), loc='upper left')\n",
    "            title = f\"{actual_model_name}: {metric_to_plot} vs {param1} (by {param2} - others varied)\"\n",
    "\n",
    "        plt.title(title)\n",
    "        plt.ylabel(metric_to_plot)\n",
    "        plt.tight_layout(rect=[0, 0, 0.85, 1] if len(hyperparam_cols) >= 2 else None) # Adjust layout for legend\n",
    "        \n",
    "        final_plot_path = os.path.join(visualizations_output_dir, plot_save_name)\n",
    "        plt.savefig(final_plot_path)\n",
    "        plt.close() # Close the plot to free memory\n",
    "        print(f\"  Saved plot: {final_plot_path}\")\n",
    "\n",
    "print(\"\\nVisualization generation complete.\")\n",
    "print(f\"Plots saved in: {visualizations_output_dir}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ae84642b",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "40ba9f31",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "sklearn-env",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.21"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
