{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "58dba433-f15c-4447-b148-853ed1898e35",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import missingno as msno\n",
    "import scipy.stats as st\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "# 解决中文显示问题\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']\n",
    "plt.rcParams['axes.unicode_minus'] = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a4200554-cdf9-4fa5-8246-6d92c444b8d9",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data = pd.read_csv(\"train2.csv\", sep = \" \")\n",
    "train_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5a29593e-bc18-4a2e-a456-d0308afea6ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data.columns.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "38e2836a-d8ef-4324-b711-91d3a499bf65",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f9f78151-2f74-44c0-8db5-e8cc87459e26",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data.isnull().sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "72b63838-eaba-4f77-8b97-9dfe39a7975a",
   "metadata": {},
   "outputs": [],
   "source": [
    "missing = train_data.isnull().sum()\n",
    "missing = missing[missing > 0]\n",
    "missing.sort_values(inplace = True)\n",
    "missing.plot.bar()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3682bfb1-1cd3-4d2f-bd28-fc9efb330c69",
   "metadata": {},
   "outputs": [],
   "source": [
    "msno.matrix(train_data.sample(10000))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1d74dc36-6a56-4571-b0a8-37bc0fa0f94f",
   "metadata": {},
   "outputs": [],
   "source": [
    "msno.bar(train_data.sample(10000))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "38132875-4621-474f-b641-e48676c96961",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data['notRepairedDamage'].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3cb8238a-4369-483f-8f9f-498bf11ae0f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_data = pd.read_csv(\"test2.csv\", sep = \" \")\n",
    "train_data[\"origin\"] = \"train\"\n",
    "test_data[\"origin\"] = \"test\"\n",
    "data = pd.concat([train_data, test_data], axis = 0, ignore_index = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b543f15c-33c4-4680-8916-8f8b1f3c101b",
   "metadata": {},
   "outputs": [],
   "source": [
    "data['notRepairedDamage'].replace(\"-\", np.nan, inplace = True)\n",
    "data['notRepairedDamage'].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0c1ee87e-d890-4845-9263-1bc95154f4d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "data['seller'].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3e1150fe-fb12-48c4-8801-045452ca18a1",
   "metadata": {},
   "outputs": [],
   "source": [
    "data[\"offerType\"].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a2ce2a57-277e-44fd-b8af-e67625744f44",
   "metadata": {},
   "outputs": [],
   "source": [
    "del data[\"seller\"]\n",
    "del data[\"offerType\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c8f8293b-321b-4d72-986b-fd6222d27993",
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(1)\n",
    "plt.title('Johnson SU')\n",
    "sns.distplot(target, kde=False, fit=st.johnsonsu)\n",
    "plt.figure(2)\n",
    "plt.title('Normal')\n",
    "sns.distplot(target, kde=False, fit=st.norm)\n",
    "plt.figure(3)\n",
    "plt.title('Log Normal')\n",
    "sns.distplot(target, kde=False, fit=st.lognorm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "288fe59a-ef93-45f6-8546-6bcc37bf3df8",
   "metadata": {},
   "outputs": [],
   "source": [
    "sns.distplot(target);\n",
    "print(\"偏度: %f\" % target.skew())\n",
    "print(\"峰度: %f\" % target.kurt())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1c4fb051-3595-4acc-83dd-5a459c4475ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 需要将其转为正态分布\n",
    "sns.distplot(np.log(target))\n",
    "print(\"偏度: %f\" % np.log(target).skew())\n",
    "print(\"峰度: %f\" % np.log(target).kurt())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5d50e90a-58d9-4d03-be78-0402caaed466",
   "metadata": {},
   "outputs": [],
   "source": [
    "numeric_features = ['power', 'kilometer', 'v_0', 'v_1', 'v_2', 'v_3', \n",
    "                    'v_4', 'v_5', 'v_6', 'v_7', 'v_8', 'v_9', 'v_10',\n",
    "                    'v_11', 'v_12', 'v_13','v_14' ]\n",
    "\n",
    "categorical_features = ['name', 'model', 'brand', 'bodyType', 'fuelType','gearbox', 'notRepairedDamage', 'regionCode',]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ca264714-ffa2-4543-9ec7-fd025d7cc0dd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对于类别型的特征需要查看其取值有多少个，能不能转换为onehot\n",
    "for feature in categorical_features:\n",
    "    print(feature,\"特征有{}个取值\".format(train_data[feature].nunique()))\n",
    "    print(train_data[feature].value_counts())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa9c1370-3c8a-4615-b048-e8dfaf8726d9",
   "metadata": {},
   "outputs": [],
   "source": [
    "numeric_features.append(\"price\")\n",
    "price_numeric = train_data[numeric_features]\n",
    "correlation_score = price_numeric.corr() # 得到是一个特征数*特征数的矩阵，元素都行和列对应特征之间的相关性\n",
    "correlation_score['price'].sort_values(ascending = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2994e288-ac95-41cd-8fa5-db913d04ce25",
   "metadata": {},
   "outputs": [],
   "source": [
    "fig,ax = plt.subplots(figsize = (12,12))\n",
    "plt.title(\"相关性展示\")\n",
    "sns.heatmap(correlation_score, square = True, vmax = 0.8)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e79c9963-8453-4f68-abc9-77d890b57682",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 查看特征值的偏度和峰度\n",
    "for col in numeric_features:\n",
    "    print(\"{:15}\\t Skewness:{:05.2f}\\t Kurtosis:{:06.2f}\".format(col,\n",
    "                                                    train_data[col].skew(), \n",
    "                                                   train_data[col].kurt()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ec46dc05-9f67-4c02-8a29-eb475905003b",
   "metadata": {},
   "outputs": [],
   "source": [
    "f = pd.melt(train_data, value_vars=numeric_features)\n",
    "# 这里相当于f是一个两列的矩阵，第一列是原来特征\n",
    "# 第二列是特征对应的取值，例如power有n个取值，那么它会占据n行，这样叠在一起\n",
    "g = sns.FacetGrid(f, col=\"variable\",  col_wrap=2, sharex=False, sharey=False)\n",
    "#g 是产生一个对象，可以用来应用各种图面画图，map应用\n",
    "# 第一个参数就是dataframe数据，但是要求是长数据，也就是melt处理完的数据\n",
    "# 第二个参数是用来画图依据的列，valiable是melt处理完，那些特征的列名称\n",
    "# 而那些值的列名称为value\n",
    "# 第三个参数col_wrap是代表分成多少列\n",
    "g = g.map(sns.distplot, \"value\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2283c7a8-5682-404f-bec8-aa6154162bc8",
   "metadata": {},
   "outputs": [],
   "source": [
    "sns.pairplot(train_data[numeric_features], size = 2,  kind = \"scatter\",diag_kind = \"kde\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "91d65779-0501-4866-844c-04c6404c8ba2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面对类别特征做处理\n",
    "categorical_features_2 = ['model',\n",
    " 'brand',\n",
    " 'bodyType',\n",
    " 'fuelType',\n",
    " 'gearbox',\n",
    " 'notRepairedDamage']\n",
    "for c in categorical_features_2:\n",
    "    train_data[c] = train_data[c].astype(\"category\")\n",
    "    # 将这些的类型转换为分类类型，不保留原来的int或者float类型\n",
    "    if train_data[c].isnull().any():\n",
    "        # 如果该列存在nan的话\n",
    "        train_data[c] = train_data[c].cat.add_categories(['Missing'])\n",
    "        # 增加一个新的分类为missing，用它来填充那些nan，代表缺失值，\n",
    "        # 这样在后面画图方便展示\n",
    "        train_data[c] = train_data[c].fillna('Missing')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "49c27a05-e1fc-4f0a-80ba-4551d47e4933",
   "metadata": {},
   "outputs": [],
   "source": [
    "def bar_plot(x, y, **kwargs):\n",
    "    sns.barplot(x = x, y = y)\n",
    "    x = plt.xticks(rotation = 90)\n",
    "    \n",
    "f = pd.melt(train_data, id_vars = ['price'], value_vars = categorical_features_2)\n",
    "g = sns.FacetGrid(f, col = 'variable', col_wrap = 2, sharex = False, sharey = False)\n",
    "g = g.map(bar_plot, \"value\", \"price\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ada1c4a0-e826-461e-9293-43e6bcc1d877",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 主要就是power的值分布太过于异常，那么可以对一些进行处理，删除掉\n",
    "# 下面定义一个函数用来处理异常值\n",
    "def outliers_proc(data, col_name, scale = 3):\n",
    "    # data：原数据\n",
    "    # col_name：要处理异常值的列名称\n",
    "    # scale：用来控制删除尺度的\n",
    "    def box_plot_outliers(data_ser, box_scale):\n",
    "        iqr = box_scale * (data_ser.quantile(0.75) - data_ser.quantile(0.25))\n",
    "        # quantile是取出数据对应分位数的数值\n",
    "        val_low = data_ser.quantile(0.25) - iqr # 下界\n",
    "        val_up = data_ser.quantile(0.75) + iqr # 上界\n",
    "        rule_low = (data_ser < val_low) # 筛选出小于下界的索引\n",
    "        rule_up = (data_ser > val_up) # 筛选出大于上界的索引\n",
    "        return (rule_low, rule_up),(val_low, val_up)\n",
    "    \n",
    "    data_n = data.copy()\n",
    "    data_series = data_n[col_name]  # 取出对应数据\n",
    "    rule, values = box_plot_outliers(data_series, box_scale = scale)\n",
    "    index = np.arange(data_series.shape[0])[rule[0] | rule[1]]\n",
    "    # 先产生0到n-1，然后再用索引把其中处于异常值的索引取出来\n",
    "    print(\"Delete number is {}\".format(len(index)))\n",
    "    data_n = data_n.drop(index) # 整行数据都丢弃\n",
    "    data_n.reset_index(drop = True, inplace = True)  # 重新设置索引\n",
    "    print(\"Now column number is:{}\".format(data_n.shape[0]))\n",
    "    index_low = np.arange(data_series.shape[0])[rule[0]]\n",
    "    outliers = data_series.iloc[index_low]  # 小于下界的值\n",
    "    print(\"Description of data less than the lower bound is:\")\n",
    "    print(pd.Series(outliers).describe())\n",
    "    index_up = np.arange(data_series.shape[0])[rule[1]]\n",
    "    outliers = data_series.iloc[index_up]\n",
    "    print(\"Description of data larger than the lower bound is:\")\n",
    "    print(pd.Series(outliers).describe())\n",
    "    fig, axes = plt.subplots(1,2,figsize = (10,7))\n",
    "    ax1 = sns.boxplot(y = data[col_name], data = data, palette = \"Set1\", ax = axes[0])\n",
    "    ax1.set_title(\"处理异常值前\")\n",
    "    ax2 = sns.boxplot(y = data_n[col_name], data = data_n, palette = \"Set1\", ax = axes[1])\n",
    "    ax2.set_title(\"处理异常值后\")\n",
    "    return data_n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "75409923-a288-4d0b-8771-6c3df0e978ab",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data_delete_after = outliers_proc(train_data, \"power\", scale =3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b0d523a-8db2-4117-a2c7-020f33d58f77",
   "metadata": {},
   "outputs": [],
   "source": [
    "bin_power = [i*10 for i in range(31)]\n",
    "data[\"power_bin\"] = pd.cut(data[\"power\"],bin_power,right = False,labels = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "107aa7d0-ba43-499d-9396-5746b597d696",
   "metadata": {},
   "outputs": [],
   "source": [
    "data['power_bin'] = data['power_bin'].fillna(31)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "727ce775-f9a5-43ca-aa6a-1774010a6dcc",
   "metadata": {},
   "outputs": [],
   "source": [
    "data['power'] = np.log(data['power'] + 1) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "81e0d8d4-d302-4c96-b29a-2a0b277b8983",
   "metadata": {},
   "outputs": [],
   "source": [
    "data[\"use_time\"] = (pd.to_datetime(data['creatDate'],format = \"%Y%m%d\",errors = \"coerce\")\n",
    "                        - pd.to_datetime(data[\"regDate\"], format = \"%Y%m%d\", errors = \"coerce\")).dt.days\n",
    "# errors是当格式转换错误就赋予nan"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c130673b-69fd-4ab1-9a41-a980d66b4849",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算某个品牌的各种统计数目量\n",
    "train_gb = train_data.groupby(\"brand\")\n",
    "all_info = {}\n",
    "for kind, kind_data in train_gb:\n",
    "    info = {}\n",
    "    kind_data = kind_data[kind_data[\"price\"] > 0]\n",
    "    # 把价格小于0的可能存在的异常值去除\n",
    "    info[\"brand_amount\"] = len(kind_data) # 该品牌的数量\n",
    "    info[\"brand_price_max\"] = kind_data.price.max() # 该品牌价格最大值\n",
    "    info[\"brand_price_min\"] = kind_data.price.min() # 该品牌价格最小值\n",
    "    info[\"brand_price_median\"] = kind_data.price.median() # 该品牌价格中位数\n",
    "    info[\"brand_price_sum\"] = kind_data.price.sum() # 该品牌价格总和\n",
    "    info[\"brand_price_std\"] = kind_data.price.std() # 方差\n",
    "    info[\"brand_price_average\"] = round(kind_data.price.sum() / (len(kind_data) + 1), 2)\n",
    "    # 均值，保留两位小数\n",
    "    all_info[kind] = info\n",
    "brand_feature = pd.DataFrame(all_info).T.reset_index().rename(columns = {\"index\":\"brand\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "78e574c5-7ad4-42fb-9e87-eadd27018643",
   "metadata": {},
   "outputs": [],
   "source": [
    "brand_feature = pd.DataFrame(all_info)\n",
    "brand_feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf0ca3a8-487c-4d82-92a3-c52364457cf3",
   "metadata": {},
   "outputs": [],
   "source": [
    "brand_feature = pd.DataFrame(all_info).T.reset_index()\n",
    "brand_feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "97302385-6042-433c-9c81-1ecb2383dc26",
   "metadata": {},
   "outputs": [],
   "source": [
    "brand_feature = pd.DataFrame(all_info).T.reset_index().rename(columns = {\"index\":\"brand\"})\n",
    "brand_feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d40f2d2c-563d-43bf-8a31-a1a4c2c59136",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = data.merge(brand_feature, how='left', on='brand')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9d093b7e-48bd-4cb8-b3df-9c16cb1b4750",
   "metadata": {},
   "outputs": [],
   "source": [
    "def max_min(x):\n",
    "    return (x - np.min(x)) / (np.max(x) - np.min(x))\n",
    "for feature in [\"brand_amount\",\"brand_price_average\",\"brand_price_max\",\n",
    "                \"brand_price_median\",\"brand_price_min\",\"brand_price_std\",\n",
    "               \"brand_price_sum\",\"power\",\"kilometer\"]:\n",
    "    data[feature] = max_min(data[feature])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fc42006e-fb3e-4c8d-a8aa-9dd5962c20eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对类别特征转换为onehot\n",
    "data = pd.get_dummies(data, columns=['model', 'brand', 'bodyType','fuelType','gearbox', \n",
    "                                     'notRepairedDamage', 'power_bin'],dummy_na=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf755e3b-9827-4212-b96d-12eab799f4f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "use_feature = [x for x in data.columns if x not in ['SaleID',\"name\",\"price\",\"origin\"]]\n",
    "target = data[data[\"origin\"] == \"train\"][\"price\"]\n",
    "target_lg = (np.log(target+1))\n",
    "\n",
    "train_x = data[data[\"origin\"] == \"train\"][use_feature]\n",
    "test_x = data[data[\"origin\"] == \"test\"][use_feature]\n",
    "\n",
    "train_x[\"use_time\"] = train_x[\"use_time\"].fillna(train_x[\"use_time\"].mean())\n",
    "\n",
    "test_x[\"use_time\"] = test_x[\"use_time\"].fillna(train_x[\"use_time\"].mean())# 用训练数据集的均值填充\n",
    "\n",
    "train_x.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55e50d28-ddea-4247-b410-0757d9bb7737",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn import metrics\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.metrics import roc_auc_score, roc_curve, mean_squared_error,mean_absolute_error, f1_score\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "from sklearn.ensemble import RandomForestRegressor as rfr\n",
    "from sklearn.model_selection import  KFold, StratifiedKFold,GroupKFold, RepeatedKFold\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn import preprocessing\n",
    "from sklearn.metrics import mean_absolute_error\n",
    "from sklearn.ensemble import GradientBoostingRegressor as gbr\n",
    "from sklearn.linear_model import LinearRegression as lr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3726af56-1ba5-4aea-9214-dc13556d78ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "lgb_param = {  # 这是训练的参数列表\n",
    "    \"num_leaves\":7,\n",
    "    \"min_data_in_leaf\": 20,  # 一个叶子上最小分配到的数量，用来处理过拟合\n",
    "    \"objective\": \"regression\",  # 设置类型为回归\n",
    "    \"max_depth\": -1,  # 限制树的最大深度，-1代表没有限制\n",
    "    \"learning_rate\": 0.003,\n",
    "    \"boosting\": \"gbdt\",  # 用gbdt算法\n",
    "    \"feature_fraction\": 0.50,  # 每次迭代时使用18%的特征参与建树，引入特征子空间的多样性\n",
    "    \"bagging_freq\": 1,  # 每一次迭代都执行bagging\n",
    "    \"bagging_fraction\": 0.55,  # 每次bagging在不进行重采样的情况下随机选择55%数据训练\n",
    "    \"bagging_seed\": 1,\n",
    "    \"metric\": 'mean_absolute_error',\n",
    "    \"lambda_l1\": 0.5,\n",
    "    \"lambda_l2\": 0.5,\n",
    "    \"verbosity\": -1  # 打印消息的详细程度\n",
    "}\n",
    "folds = StratifiedKFold(n_splits=5, shuffle=True, random_state = 4)\n",
    "# 产生一个容器，可以用来对对数据集进行打乱的5次切分，以此来进行五折交叉验证\n",
    "valid_lgb = np.zeros(len(train_x))\n",
    "predictions_lgb = np.zeros(len(test_x))\n",
    "\n",
    "\n",
    "for fold_, (train_idx, valid_idx) in enumerate(folds.split(train_x, target)):\n",
    "    # 切分后返回的训练集和验证集的索引\n",
    "    print(\"fold n{}\".format(fold_+1))  # 当前第几折\n",
    "    train_data_now = lgb.Dataset(train_x.iloc[train_idx], target_lg[train_idx])\n",
    "    valid_data_now = lgb.Dataset(train_x.iloc[valid_idx], target_lg[valid_idx])\n",
    "    # 取出数据并转换为lgb的数据\n",
    "    num_round = 10000\n",
    "    lgb_model = lgb.train(lgb_param, train_data_now, num_round, \n",
    "                        valid_sets=[train_data_now, valid_data_now], verbose_eval=500,\n",
    "                       early_stopping_rounds = 800)\n",
    "    valid_lgb[valid_idx] = lgb_model.predict(train_x.iloc[valid_idx],\n",
    "                                             num_iteration=lgb_model.best_iteration)\n",
    "    predictions_lgb += lgb_model.predict(test_x, num_iteration=\n",
    "                                           lgb_model.best_iteration) / folds.n_splits\n",
    "    # 这是将预测概率进行平均\n",
    "print(\"CV score: {:<8.8f}\".format(mean_absolute_error(valid_lgb, target_lg)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "82e93b04-0fa7-4355-ac02-193335f2da4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "pd.set_option(\"display.max_columns\", None)  # 设置可以显示的最大行和最大列\n",
    "pd.set_option('display.max_rows', None)  # 如果超过就显示省略号，none表示不省略\n",
    "#设置value的显示长度为100，默认为50\n",
    "pd.set_option('max_colwidth',100)\n",
    "# 创建，然后只有一列就是刚才所使用的的特征\n",
    "df = pd.DataFrame(train_x.columns.tolist(), columns=['feature'])\n",
    "df['importance'] = list(lgb_model.feature_importance())\n",
    "df = df.sort_values(by='importance', ascending=False)  # 降序排列\n",
    "plt.figure(figsize = (14,28))\n",
    "sns.barplot(x='importance', y='feature', data = df.head(50))# 取出前五十个画图\n",
    "plt.title('Features importance (averaged/folds)')\n",
    "plt.tight_layout()  # 自动调整适应范围"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b701ebe7-5f29-4423-918d-03c748c62ac1",
   "metadata": {},
   "outputs": [],
   "source": [
    "#RandomForestRegressor随机森林\n",
    "folds = KFold(n_splits=5, shuffle=True, random_state=2019)\n",
    "valid_rfr = np.zeros(len(train_x))\n",
    "predictions_rfr = np.zeros(len(test_x))\n",
    " \n",
    "for fold_, (trn_idx, val_idx) in enumerate(folds.split(train_x, target)):\n",
    "    print(\"fold n°{}\".format(fold_+1))\n",
    "    tr_x = train_x.iloc[trn_idx]\n",
    "    tr_y = target_lg[trn_idx]\n",
    "    rfr_model = rfr(n_estimators=1600,max_depth=9, min_samples_leaf=9, \n",
    "                  min_weight_fraction_leaf=0.0,max_features=0.25,\n",
    "                  verbose=1,n_jobs=-1) #并行化\n",
    "    #verbose = 0 为不在标准输出流输出日志信息\n",
    "#verbose = 1 为输出进度条记录\n",
    "#verbose = 2 为每个epoch输出一行记录\n",
    "    rfr_model.fit(tr_x,tr_y)\n",
    "    valid_rfr[val_idx] = rfr_model.predict(train_x.iloc[val_idx])\n",
    "    \n",
    "    predictions_rfr += rfr_model.predict(test_x) / folds.n_splits\n",
    "    \n",
    "print(\"CV score: {:<8.8f}\".format(mean_absolute_error(valid_rfr, target_lg)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2aa46a79-48c0-44d1-8ba7-0470042d406f",
   "metadata": {},
   "outputs": [],
   "source": [
    "#GradientBoostingRegressor梯度提升决策树\n",
    "folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2018)\n",
    "valid_gbr = np.zeros(len(train_x))\n",
    "predictions_gbr = np.zeros(len(test_x))\n",
    " \n",
    "for fold_, (trn_idx, val_idx) in enumerate(folds.split(train_x, target)):\n",
    "    print(\"fold n°{}\".format(fold_+1))\n",
    "    tr_x = train_x.iloc[trn_idx]\n",
    "    tr_y = target_lg[trn_idx]\n",
    "    gbr_model = gbr(n_estimators=100, learning_rate=0.1,subsample=0.65 ,max_depth=7, \n",
    "                    min_samples_leaf=20, max_features=0.22,verbose=1)\n",
    "    gbr_model.fit(tr_x,tr_y)\n",
    "    valid_gbr[val_idx] = gbr_model.predict(train_x.iloc[val_idx])\n",
    "    \n",
    "    predictions_gbr += gbr_model.predict(test_x) / folds.n_splits\n",
    " \n",
    "print(\"CV score: {:<8.8f}\".format(mean_absolute_error(valid_gbr, target_lg)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "69fcd81e-31f8-4e00-bb89-f3c4d63b4407",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_stack2 = np.vstack([valid_lgb, valid_rfr, valid_gbr]).transpose()\n",
    "test_stack2 = np.vstack([predictions_lgb, predictions_rfr,predictions_gbr]).transpose()\n",
    "#交叉验证:5折，重复2次\n",
    "folds_stack = RepeatedKFold(n_splits=5, n_repeats=2, random_state=7)\n",
    "valid_stack2 = np.zeros(train_stack2.shape[0])\n",
    "predictions_lr2 = np.zeros(test_stack2.shape[0])\n",
    " \n",
    "for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack2,target)):\n",
    "    print(\"fold {}\".format(fold_))\n",
    "    trn_data, trn_y = train_stack2[trn_idx], target_lg.iloc[trn_idx].values\n",
    "    val_data, val_y = train_stack2[val_idx], target_lg.iloc[val_idx].values\n",
    "    #Kernel Ridge Regression\n",
    "    lr2 = lr()\n",
    "    lr2.fit(trn_data, trn_y)\n",
    "    \n",
    "    valid_stack2[val_idx] = lr2.predict(val_data)\n",
    "    predictions_lr2 += lr2.predict(test_stack2) / 10\n",
    "    \n",
    "print(\"CV score: {:<8.8f}\".format(mean_absolute_error(target_lg.values, valid_stack2)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "56b55b8c-cfcd-4916-8654-85472801c093",
   "metadata": {},
   "outputs": [],
   "source": [
    "prediction_test = np.exp(predictions_lr2) - 1\n",
    "test_submission = pd.read_csv(\"test2.csv\", sep = \" \")\n",
    "test_submission[\"price\"] = prediction_test\n",
    "feature_submission = [\"SaleID\",\"price\"]\n",
    "sub = test_submission[feature_submission]\n",
    "sub.to_csv(\"mysubmission.csv\",index = False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
