{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from sklearn import preprocessing\n",
    "import numpy as np\n",
    "import math\n",
    "\n",
    "full_data_path = \"./data/bank-additional-full.csv\"\n",
    "oraginal_train_path = \"./data/train.csv\"\n",
    "oraginal_test_path = \"./data/test.csv\"\n",
    "process_x_train_data_path = \"./data/x_train_process.csv\"\n",
    "process_y_train_data_path = \"./data/y_train_process.csv\"\n",
    "process_x_test_data_path = \"./data/x_test_process.csv\"\n",
    "process_y_test_data_path = \"./data/y_test_process.csv\"\n",
    "\n",
    "# 數量取小，防止服務器崩潰\n",
    "train_num = 10000\n",
    "test_num = 2000\n",
    "\n",
    "bank_data  = pd.read_csv(full_data_path,sep=\";\") "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 特征分类\n",
    "string_features = bank_data.columns[bank_data.dtypes  == \"object\"].to_series().values\n",
    "int_features = bank_data.columns[bank_data.dtypes  == \"int64\"].to_series().values\n",
    "float_features = bank_data.columns[bank_data.dtypes  == \"float64\"].to_series().values\n",
    "numeric_features = np.append(int_features,float_features)\n",
    "\n",
    "bin_features = ['default', 'housing', 'loan','y']\n",
    "order_features = ['education']\n",
    "disorder_features = ['poutcome', 'job', 'marital', 'contact', 'month','day_of_week']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 缺失值处理\n",
    "def Missing_value_perprocessing_mean (bank_data_small_train,bank_data_small_test):\n",
    "    col  = bank_data_small_train.columns\n",
    "    #Train_copy = Train.copy()\n",
    "    #直接使用平均值填补缺失值\n",
    "    from sklearn.preprocessing import Imputer\n",
    "    imp = Imputer(missing_values=np.nan, strategy='mean', axis=0)\n",
    "    imp.fit(bank_data_small_train)\n",
    "    bank_data_small_train = imp.transform(bank_data_small_train) \n",
    "    bank_data_small_test = imp.transform(bank_data_small_test) \n",
    "    bank_data_small_train = pd.DataFrame(bank_data_small_train,columns = col)\n",
    "    bank_data_small_test = pd.DataFrame(bank_data_small_test,columns = col)\n",
    "    return bank_data_small_train,bank_data_small_test "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#归一化\n",
    "def Scale_perprocessing (Train):\n",
    "    col  = Train.columns\n",
    "    copy = Train.copy()\n",
    "    scaler = preprocessing.MinMaxScaler()\n",
    "    #新版本中fit_transform的第一个参数必须为二维矩阵\n",
    "    copy = scaler.fit_transform(copy)\n",
    "    Train = pd.DataFrame(copy,columns = col)\n",
    "    return Train "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "#处理二分类的特征\n",
    "def bin_features_perprocessing (bin_features, bank_data):\n",
    "    for feature in bin_features:      \n",
    "        new = np.zeros(bank_data[feature].shape[0])\n",
    "        for rol in range(bank_data[feature].shape[0]):\n",
    "            if bank_data[feature][rol] == 'yes' :\n",
    "                new[rol] = 1\n",
    "            elif bank_data[feature][rol]  == 'no':\n",
    "                new[rol] = 0\n",
    "            else:\n",
    "                new[rol] = None\n",
    "        bank_data[feature] =  new   \n",
    "    return bank_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#特征值有次序关系的特征，按照特征值强弱排序（如：受教育程度）\n",
    "def order_features_perprocessing (order_features,bank_data):\n",
    "    education_values = [\"illiterate\", \"basic.4y\", \"basic.6y\", \"basic.9y\", \n",
    "    \"high.school\",  \"professional.course\", \"university.degree\",\"unknown\"]\n",
    "    replace_values = list(range(1,  len(education_values)))\n",
    "    replace_values.append(None)\n",
    "    #除了replace也可以用map()\n",
    "    bank_data[order_features] = bank_data[order_features].replace(education_values,replace_values)\n",
    "    bank_data[order_features] = bank_data[order_features].astype(\"float\")\n",
    "    return bank_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "#特征值没有次序的特征，一律使用onehot编码\n",
    "def disorder_features_perprocessing (disorder_features, bank_data):\n",
    "    for features in disorder_features:\n",
    "        #做onehot\n",
    "        features_onehot = pd.get_dummies(bank_data[features])\n",
    "        #把名字改成features_values\n",
    "        features_onehot = features_onehot.rename(columns=lambda x: features+'_'+str(x))\n",
    "        #拼接onehot得到的新features\n",
    "        bank_data = pd.concat([bank_data,features_onehot],axis=1)\n",
    "        #删掉原来的feature columns\n",
    "        bank_data = bank_data.drop(features, axis=1)\n",
    "    return bank_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/lib/python3.5/site-packages/sklearn/utils/deprecation.py:66: DeprecationWarning: Class Imputer is deprecated; Imputer was deprecated in version 0.20 and will be removed in 0.22. Import impute.SimpleImputer from sklearn instead.\n",
      "  warnings.warn(msg, category=DeprecationWarning)\n"
     ]
    }
   ],
   "source": [
    "#随机重排后，生成训练集和测试集\n",
    "bank_data = bank_data.sample(frac=1,random_state=12)\n",
    "# 导出测试集和训练集的原始数据用于数据分析\n",
    "(bank_data.iloc[0:train_num,:]).to_csv(oraginal_train_path,index = False)\n",
    "(bank_data.iloc[train_num:(train_num+test_num),:]).to_csv(oraginal_test_path,index = False)\n",
    "#转化二分类特征为1，0\n",
    "bank_data = bin_features_perprocessing(bin_features, bank_data)\n",
    "#转化包含次序的特征\n",
    "bank_data = order_features_perprocessing(order_features, bank_data)\n",
    "#转化无序的特征\n",
    "bank_data = disorder_features_perprocessing(disorder_features, bank_data)\n",
    "\n",
    "bank_data_train = bank_data.iloc[0:train_num,:]\n",
    "bank_data_test = bank_data.iloc[train_num:(train_num+test_num),:]\n",
    "\n",
    "bank_data_train,bank_data_test = Missing_value_perprocessing_mean(bank_data_train,bank_data_test)\n",
    "\n",
    "X_train = bank_data_train.drop(['y'], axis=1).copy()\n",
    "y_train = pd.DataFrame(bank_data_train['y'],columns = ['y'])\n",
    "\n",
    "X_test = bank_data_test.drop(['y'], axis=1).copy()\n",
    "y_test = pd.DataFrame(bank_data_test['y'],columns = ['y'])\n",
    "\n",
    "X_train = Scale_perprocessing(X_train)\n",
    "X_test = Scale_perprocessing(X_test)\n",
    "\n",
    "X_test.to_csv(process_x_test_data_path,index = False)\n",
    "y_test.to_csv(process_y_test_data_path,index = False)\n",
    "X_train.to_csv(process_x_train_data_path,index = False)\n",
    "y_train.to_csv(process_y_train_data_path,index = False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
