{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "98fadb98-6e6f-422a-a1c7-7aea4beb1e90",
   "metadata": {},
   "outputs": [],
   "source": [
    "filepath = \"./data/fujian-poc/function_pm.csv\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bbba3bb6-ebcf-4704-84f1-c49bbff1c565",
   "metadata": {},
   "source": [
    "### 归一化\n",
    "min-max 归一化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "id": "ddc5c9b1-9057-4250-bfc0-f2689d84151d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      id  y        x0        x1        x2        x3        x4        x5  \\\n",
      "0    133  1  0.340448  0.192697  0.318193  0.181282  0.337648  0.162616   \n",
      "1    273  1  0.100320  0.236141  0.088052  0.041781  0.498778  0.064431   \n",
      "2    175  1  0.047385  0.133795  0.039594  0.018187  0.297365  0.041990   \n",
      "3    551  1  0.145500  0.432836  0.136411  0.061787  0.247837  0.146414   \n",
      "4    199  0  0.369975  0.482409  0.336122  0.211070  0.554910  0.367038   \n",
      "..   ... ..       ...       ...       ...       ...       ...       ...   \n",
      "495  492  0  0.483814  0.374200  0.463121  0.304955  0.394440  0.199290   \n",
      "496  181  0  0.667022  0.571962  0.627970  0.467902  0.514627  0.709327   \n",
      "497  427  1  0.171825  0.533582  0.165745  0.074789  0.390477  0.138070   \n",
      "498  471  1  0.201708  0.567964  0.183425  0.093984  0.217460  0.067885   \n",
      "499  515  1  0.161508  0.293443  0.143135  0.072110  0.509344  0.126233   \n",
      "\n",
      "           x6        x7        x8        x9  \n",
      "0    0.136502  0.472165  0.228267  0.102781  \n",
      "1    0.032292  0.177285  0.251528  0.175193  \n",
      "2    0.000000  0.000000  0.202444  0.153745  \n",
      "3    0.124920  0.220378  0.316184  0.165814  \n",
      "4    0.396725  0.631615  0.628425  0.303424  \n",
      "..        ...       ...       ...       ...  \n",
      "495  0.203195  0.511684  0.332348  0.139118  \n",
      "496  0.541534  0.997594  0.499310  0.481175  \n",
      "497  0.153914  0.257217  0.275971  0.141545  \n",
      "498  0.044122  0.190619  0.165385  0.074446  \n",
      "499  0.129712  0.293540  0.294697  0.083891  \n",
      "\n",
      "[500 rows x 12 columns]\n"
     ]
    }
   ],
   "source": [
    "import pandas\n",
    "\n",
    "def min_max(df):\n",
    "    columns = df.columns.values\n",
    "    features = [col for col in columns if col not in [\"id\", \"y\"]]\n",
    "    for col_name in features:\n",
    "        min_v = df[col_name].min()\n",
    "        max_v = df[col_name].max()\n",
    "        df[col_name] = df[col_name].apply(lambda v: (v - min_v) / (max_v - min_v))\n",
    "        \n",
    "\n",
    "df = pandas.read_csv(filepath)\n",
    "min_max(df)\n",
    "print(df)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dabe412f-d5a8-455f-a553-c6fd8daf81ec",
   "metadata": {},
   "source": [
    "### 标准化\n",
    "这里是z-score 标准化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "id": "5e6c46e7-6658-4cf4-848d-5d7f448d00ba",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "new data: \n",
      "     id  y        x0        x1        x2        x3        x4        x5  \\\n",
      "0  133  1  0.233070 -1.057552  0.186280  0.054005 -0.416952 -0.372160   \n",
      "1  273  1 -1.151743 -0.794875 -1.172874 -0.929110  0.640345 -1.007174   \n",
      "2  175  1 -1.457018 -1.413698 -1.459059 -1.095387 -0.681277 -1.152312   \n",
      "3  551  1 -0.891193  0.394425 -0.887279 -0.788121 -1.006265 -0.476950   \n",
      "4  199  0  0.403352  0.694168  0.292165  0.263930  1.008666  0.949951   \n",
      "5  274  0  0.934710  1.430632  0.798061  0.741977 -0.018299 -0.461890   \n",
      "6  420  1 -0.675777  0.188151 -0.633448 -0.642801 -0.304289 -0.379689   \n",
      "7   76  1 -0.468568 -2.146939 -0.488444 -0.495576  0.571015 -0.729826   \n",
      "8  315  1 -0.620385 -0.983422 -0.690803 -0.601751 -0.932601 -1.284335   \n",
      "9  399  1 -0.597818 -0.213117 -0.646096 -0.570920 -0.325955 -0.512089   \n",
      "\n",
      "         x6        x7        x8        x9  \n",
      "0 -0.488166  0.338710 -0.274449 -0.711415  \n",
      "1 -1.102868 -0.953301 -0.089360 -0.108860  \n",
      "2 -1.293352 -1.730073 -0.479928 -0.287334  \n",
      "3 -0.556482 -0.764490  0.425123 -0.186909  \n",
      "4  1.046823  1.037338  2.909702  0.958162  \n",
      "5 -0.313371 -0.019638 -0.617961 -0.232755  \n",
      "6 -0.087222 -0.726698  0.214938 -0.049369  \n",
      "7 -0.891042 -0.614827 -0.294840 -0.645920  \n",
      "8 -1.202045 -1.310895 -1.538698 -1.200990  \n",
      "9 -0.610663 -0.923639 -0.186610 -0.138334  \n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "pd_data = pd.read_csv(filepath)\n",
    "# print(\"raw data: \\n\", pd_data.head(10)[pd_data.columns.values[:5]])\n",
    "def z_score(series: pd.Series) -> pd.Series:\n",
    "    mean = series.mean()\n",
    "    std = series.std()\n",
    "    return series.apply(lambda v: (v - mean) / std)\n",
    "\n",
    "columns = pd_data.columns.values\n",
    "features = [col for col in columns if col not in [\"id\", \"y\"]]\n",
    "for col_name in features:\n",
    "    pd_data[col_name] = z_score(pd_data[col_name])\n",
    "\n",
    "print(\"new data: \\n\", pd_data.head(10)[pd_data.columns.values])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1a9a16d8-39df-47d8-983c-d77972dbc3bb",
   "metadata": {},
   "source": [
    "### 缺失值处理\n",
    "\n",
    "可以定义参数，指定每个特征如何处理，参数示例如下：\n",
    "```json\n",
    "rules = {\n",
    "    'x1':{'method':'mean', 'value':0.0}, \n",
    "    'x2':{'method':'mean', 'value':0.0}\n",
    "}\n",
    "```\n",
    "如果不指定参数，则默认对所有特征的缺失值填充平均值\n",
    "\n",
    "支持的填充方式(method)有：\n",
    "- max：最大值\n",
    "- min：最小值\n",
    "- mean：平均值\n",
    "- median：中位数\n",
    "- mode：众数\n",
    "- const：常数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "id": "b727670e-78b3-4f65-82fc-07b42d8b21ba",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      id  y        x0        x1        x2        x3        x4        x5  \\\n",
      "0    133  1  0.254879 -1.046633  0.209656  0.074214 -0.441366 -0.377645   \n",
      "1    273  1 -1.142928 -0.781198 -1.166747 -0.923578  0.628230 -1.021418   \n",
      "2    175  1 -1.451067 -1.406518 -1.456564 -1.092337 -0.708765 -1.168557   \n",
      "3    551  1 -0.879933  0.420589 -0.877527 -0.780484 -1.037534 -0.483880   \n",
      "4    199  0  0.426758  0.723479  0.316885  0.287273  1.000835  0.962702   \n",
      "..   ... ..       ...       ...       ...       ...       ...       ...   \n",
      "495  492  0  1.089422  0.062333  1.076424  0.958795 -0.064377 -0.137184   \n",
      "496  181  0  2.155897  1.270634  2.062335  2.124291  0.733436  3.207003   \n",
      "497  427  1 -0.726692  1.036139 -0.702088 -0.687490 -0.090679 -0.538588   \n",
      "498  471  1 -0.552743  1.246207 -0.596349 -0.550197 -1.239179 -0.998771   \n",
      "499  515  1 -0.786746 -0.431084 -0.837316 -0.706651  0.698367 -0.616197   \n",
      "\n",
      "           x6        x7        x8        x9  \n",
      "0   -0.485934  0.347072 -0.287570 -0.733474  \n",
      "1   -1.111867 -0.959523 -0.096672 -0.121683  \n",
      "2   -1.305831 -1.745063 -0.499499 -0.302893  \n",
      "3   -0.555498 -0.768581  0.433960 -0.200928  \n",
      "4    1.077099  1.053586  2.996525  0.961696  \n",
      "..        ...       ...       ...       ...  \n",
      "495 -0.085341  0.522178  0.566618 -0.426470  \n",
      "496  1.946890  2.675218  1.936879  2.463465  \n",
      "497 -0.381348 -0.605352  0.103933 -0.405966  \n",
      "498 -1.040815 -0.900443 -0.803642 -0.972870  \n",
      "499 -0.526713 -0.444407  0.257622 -0.893071  \n",
      "\n",
      "[500 rows x 12 columns]\n"
     ]
    }
   ],
   "source": [
    "class FillMissValueMethod(object):\n",
    "    MAX = \"max\"\n",
    "    MIN = \"min\"\n",
    "    MEAN = \"mean\"\n",
    "    MEDIAN = \"median\"\n",
    "    MODE = \"mode\"\n",
    "    CONST = \"const\"\n",
    "    \n",
    "\n",
    "def calc_statistics_and_fill_result(df, rules):\n",
    "    statistics, fill_result = {}, {}\n",
    "    for col_name, rule in rules.items():\n",
    "        series = df[col_name].dropna()\n",
    "        method = rule[\"method\"]\n",
    "        if method == FillMissValueMethod.MAX:\n",
    "            v = series.max()\n",
    "        elif method == FillMissValueMethod.MIN:\n",
    "            v = series.min()\n",
    "        elif method == FillMissValueMethod.MEAN:\n",
    "            v = series.mean()\n",
    "        elif method == FillMissValueMethod.MEDIAN:\n",
    "            v = series.median()\n",
    "        elif method == FillMissValueMethod.MODE:\n",
    "            v = series.mode()[0]\n",
    "        else:\n",
    "            assert method == FillMissValueMethod.CONST\n",
    "            v = rule[\"value\"]\n",
    "\n",
    "        statistics[col_name] = v\n",
    "\n",
    "        result = fill_result.setdefault(col_name, {})\n",
    "        result.update(rule)\n",
    "        result[\"missing_count\"] = float(df[col_name].isna().sum())\n",
    "        result[\"value\"] = float(v)\n",
    "\n",
    "    return statistics, fill_result\n",
    "\n",
    "\n",
    "rules = {}\n",
    "\n",
    "df = pandas.read_csv(filepath)\n",
    "columns = df.columns.values\n",
    "features = [col for col in columns if col not in [\"id\", \"y\"]]\n",
    "\n",
    "if not rules:\n",
    "    for feature in features:\n",
    "        rules[feature] = {'method':'mean', 'value':0.0}\n",
    "\n",
    "df_x = df[features]\n",
    "statistics, fill_result = calc_statistics_and_fill_result(df_x, rules)\n",
    "df.fillna(statistics, inplace=True)\n",
    "print(df)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "58cdd9cd-661d-4dc9-9622-c5680aefd96b",
   "metadata": {
    "tags": []
   },
   "source": [
    "### 特征分箱-等频"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "id": "7c3ddfe9-8844-4685-ae35-fb88540a37d0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "quantile binning: \n",
      "       id  y   x0   x1   x2   x3   x4   x5   x6   x7   x8   x9\n",
      "0    133  1  3.0  0.0  3.0  3.0  1.0  2.0  1.0  3.0  2.0  1.0\n",
      "1    273  1  0.0  1.0  0.0  0.0  3.0  0.0  0.0  0.0  2.0  2.0\n",
      "2    175  1  0.0  0.0  0.0  0.0  1.0  0.0  0.0  0.0  1.0  2.0\n",
      "3    551  1  0.0  3.0  0.0  0.0  0.0  1.0  1.0  1.0  3.0  2.0\n",
      "4    199  0  3.0  3.0  3.0  3.0  4.0  4.0  4.0  4.0  4.0  4.0\n",
      "..   ... ..  ...  ...  ...  ...  ...  ...  ...  ...  ...  ...\n",
      "495  492  0  4.0  2.0  4.0  4.0  2.0  2.0  2.0  3.0  4.0  1.0\n",
      "496  181  0  4.0  4.0  4.0  4.0  3.0  4.0  4.0  4.0  4.0  4.0\n",
      "497  427  1  1.0  4.0  1.0  1.0  2.0  1.0  2.0  1.0  3.0  1.0\n",
      "498  471  1  1.0  4.0  1.0  1.0  0.0  0.0  0.0  0.0  0.0  0.0\n",
      "499  515  1  1.0  1.0  0.0  1.0  3.0  1.0  1.0  2.0  3.0  0.0\n",
      "\n",
      "[500 rows x 12 columns]\n"
     ]
    }
   ],
   "source": [
    "import math\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.preprocessing import OrdinalEncoder\n",
    "\n",
    "# one_horz_filepath = \"./data/fujian-poc/horz.csv\"\n",
    "pd_data_raw = pd.read_csv(filepath)\n",
    "\n",
    "pd_data_quantile = pd_data_raw.copy()\n",
    "\n",
    "\n",
    "def fit_split_points_quantile(data, max_interval):\n",
    "    pd_bins = pd.qcut(data, max_interval, duplicates=\"drop\")\n",
    "    points = []\n",
    "    for v in pd_bins.unique():\n",
    "        points.append(v.left)\n",
    "    return np.array(sorted(points))\n",
    "\n",
    "\n",
    "def get_interval(points):\n",
    "    intervals = []\n",
    "    for n, i in enumerate(points):\n",
    "        if n + 1 == len(points):\n",
    "            iv = pd.Interval(left=points[n], right=np.inf, closed='left')\n",
    "        else:\n",
    "            iv = pd.Interval(left=points[n], right=points[n + 1], closed='left')\n",
    "        intervals.append(iv)\n",
    "    return intervals\n",
    "\n",
    "\n",
    "def transform(data, points):\n",
    "    assert data.min() >= min(points)\n",
    "    intervals = get_interval(points)\n",
    "\n",
    "    def _fn(col: pd.Series):\n",
    "        row = None\n",
    "        for iv in intervals:\n",
    "            if col in iv:\n",
    "                row = iv\n",
    "                break\n",
    "\n",
    "        return pd.Series([\n",
    "            row,\n",
    "        ])\n",
    "\n",
    "    data = data.apply(_fn)\n",
    "    return data\n",
    "\n",
    "\n",
    "def ordinal(series):\n",
    "    oe = OrdinalEncoder()\n",
    "    trans = oe.fit_transform(series.values.reshape(-1, 1))\n",
    "    res = trans.reshape(1, -1)[0]\n",
    "    trans_dict = {}\n",
    "    for n, i in enumerate(oe.categories_[0]):\n",
    "        trans_dict.update({i: n})\n",
    "    return res, trans_dict\n",
    "\n",
    "\n",
    "columns = pd_data_raw.columns.values\n",
    "features = [col for col in columns if col not in [\"id\", \"y\"]]\n",
    "\n",
    "bin_num = 5\n",
    "for x_col in features:\n",
    "    # print(\"raw data: \\n\", pd_data_raw[x_col])\n",
    "    points = fit_split_points_quantile(pd_data_raw[x_col], bin_num)\n",
    "\n",
    "    pd_data_trans_1 = transform(pd_data_quantile[x_col], points)\n",
    "    pd_data_quantile[x_col], _ = ordinal(pd_data_trans_1)\n",
    "\n",
    "print(\"quantile binning: \\n\", pd_data_quantile)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "77970081-e8d5-45f3-a74b-892f1ce6d797",
   "metadata": {
    "tags": []
   },
   "source": [
    "### 特征分箱-等宽"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "id": "2602420d-c944-421d-a562-fcafe79a4bbc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bucket binning: \n",
      "       id  y   x0   x1   x2   x3   x4   x5   x6   x7   x8   x9\n",
      "0    133  1  1.0  0.0  1.0  0.0  1.0  0.0  0.0  2.0  1.0  0.0\n",
      "1    273  1  0.0  1.0  0.0  0.0  2.0  0.0  0.0  0.0  1.0  0.0\n",
      "2    175  1  0.0  0.0  0.0  0.0  1.0  0.0  0.0  0.0  1.0  0.0\n",
      "3    551  1  0.0  2.0  0.0  0.0  1.0  0.0  0.0  1.0  1.0  0.0\n",
      "4    199  0  1.0  2.0  1.0  1.0  2.0  1.0  1.0  3.0  3.0  1.0\n",
      "..   ... ..  ...  ...  ...  ...  ...  ...  ...  ...  ...  ...\n",
      "495  492  0  2.0  1.0  2.0  1.0  1.0  0.0  1.0  2.0  1.0  0.0\n",
      "496  181  0  3.0  2.0  3.0  2.0  2.0  3.0  2.0  4.0  2.0  2.0\n",
      "497  427  1  0.0  2.0  0.0  0.0  1.0  0.0  0.0  1.0  1.0  0.0\n",
      "498  471  1  1.0  2.0  0.0  0.0  1.0  0.0  0.0  0.0  0.0  0.0\n",
      "499  515  1  0.0  1.0  0.0  0.0  2.0  0.0  0.0  1.0  1.0  0.0\n",
      "\n",
      "[500 rows x 12 columns]\n"
     ]
    }
   ],
   "source": [
    "import math\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.preprocessing import OrdinalEncoder\n",
    "\n",
    "# one_horz_filepath = \"./data/fujian-poc/horz.csv\"\n",
    "pd_data_raw = pd.read_csv(filepath)\n",
    "\n",
    "pd_data_bucket = pd_data_raw.copy()\n",
    "\n",
    "def fit_split_points_bucket(data, max_interval):\n",
    "    pd_bins = pd.cut(data, max_interval)\n",
    "    points = []\n",
    "    for v in pd_bins.unique():\n",
    "        points.append(v.left)\n",
    "    return np.array(sorted(points))\n",
    "\n",
    "def get_interval(points):\n",
    "    intervals = []\n",
    "    for n, i in enumerate(points):\n",
    "        if n + 1 == len(points):\n",
    "            iv = pd.Interval(left=points[n], right=np.inf, closed='left')\n",
    "        else:\n",
    "            iv = pd.Interval(left=points[n], right=points[n + 1], closed='left')\n",
    "        intervals.append(iv)\n",
    "    return intervals\n",
    "\n",
    "\n",
    "def transform(data, points):\n",
    "    assert data.min() >= min(points)\n",
    "    intervals = get_interval(points)\n",
    "\n",
    "    def _fn(col: pd.Series):\n",
    "        row = None\n",
    "        for iv in intervals:\n",
    "            if col in iv:\n",
    "                row = iv\n",
    "                break\n",
    "\n",
    "        return pd.Series([\n",
    "            row,\n",
    "        ])\n",
    "\n",
    "    data = data.apply(_fn)\n",
    "    return data\n",
    "\n",
    "\n",
    "def ordinal(series):\n",
    "    oe = OrdinalEncoder()\n",
    "    trans = oe.fit_transform(series.values.reshape(-1, 1))\n",
    "    res = trans.reshape(1, -1)[0]\n",
    "    trans_dict = {}\n",
    "    for n, i in enumerate(oe.categories_[0]):\n",
    "        trans_dict.update({i: n})\n",
    "    return res, trans_dict\n",
    "\n",
    "\n",
    "columns = pd_data_raw.columns.values\n",
    "features = [col for col in columns if col not in [\"id\", \"y\"]]\n",
    "\n",
    "bin_num = 5\n",
    "for x_col in features:\n",
    "    points = fit_split_points_bucket(pd_data_raw[x_col], bin_num)\n",
    "    pd_data_trans_1 = transform(pd_data_raw[x_col], points)\n",
    "    pd_data_bucket[x_col], _ = ordinal(pd_data_trans_1)\n",
    "\n",
    "print(\"bucket binning: \\n\", pd_data_bucket)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "995763df-12af-453b-8570-386171842359",
   "metadata": {},
   "source": [
    "### onehot编码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "id": "2fb843a3-4f0a-4226-bfc9-4995d8b14730",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      id  y  x0_0.0  x0_1.0  x0_2.0  x0_3.0  x0_4.0  x1_0.0  x1_1.0  x1_2.0  \\\n",
      "0    133  1     0.0     1.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "1    273  1     1.0     0.0     0.0     0.0     0.0     0.0     1.0     0.0   \n",
      "2    175  1     1.0     0.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "3    551  1     1.0     0.0     0.0     0.0     0.0     0.0     0.0     1.0   \n",
      "4    199  0     0.0     1.0     0.0     0.0     0.0     0.0     0.0     1.0   \n",
      "..   ... ..     ...     ...     ...     ...     ...     ...     ...     ...   \n",
      "495  492  0     0.0     0.0     1.0     0.0     0.0     0.0     1.0     0.0   \n",
      "496  181  0     0.0     0.0     0.0     1.0     0.0     0.0     0.0     1.0   \n",
      "497  427  1     1.0     0.0     0.0     0.0     0.0     0.0     0.0     1.0   \n",
      "498  471  1     0.0     1.0     0.0     0.0     0.0     0.0     0.0     1.0   \n",
      "499  515  1     1.0     0.0     0.0     0.0     0.0     0.0     1.0     0.0   \n",
      "\n",
      "     ...  x8_0.0  x8_1.0  x8_2.0  x8_3.0  x8_4.0  x9_0.0  x9_1.0  x9_2.0  \\\n",
      "0    ...     0.0     1.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "1    ...     0.0     1.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "2    ...     0.0     1.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "3    ...     0.0     1.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "4    ...     0.0     0.0     0.0     1.0     0.0     0.0     1.0     0.0   \n",
      "..   ...     ...     ...     ...     ...     ...     ...     ...     ...   \n",
      "495  ...     0.0     1.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "496  ...     0.0     0.0     1.0     0.0     0.0     0.0     0.0     1.0   \n",
      "497  ...     0.0     1.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "498  ...     1.0     0.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "499  ...     0.0     1.0     0.0     0.0     0.0     1.0     0.0     0.0   \n",
      "\n",
      "     x9_3.0  x9_4.0  \n",
      "0       0.0     0.0  \n",
      "1       0.0     0.0  \n",
      "2       0.0     0.0  \n",
      "3       0.0     0.0  \n",
      "4       0.0     0.0  \n",
      "..      ...     ...  \n",
      "495     0.0     0.0  \n",
      "496     0.0     0.0  \n",
      "497     0.0     0.0  \n",
      "498     0.0     0.0  \n",
      "499     0.0     0.0  \n",
      "\n",
      "[500 rows x 52 columns]\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.preprocessing import OneHotEncoder\n",
    "\n",
    "# one_hot_filepath = \"./data/fujian-poc/one_hot.csv\"\n",
    "# pd_data_raw = pd.read_csv(filepath)\n",
    "# print(pd_data_bucket)\n",
    "\n",
    "def one_hot(series):\n",
    "    oe = OneHotEncoder()\n",
    "    trans = oe.fit_transform(series.values.reshape(-1, 1)).toarray()\n",
    "    len_categories = len(oe.categories_[0])\n",
    "    trans_dict = {}\n",
    "    for n, i in enumerate(oe.categories_[0]):\n",
    "        background = [\"0\"] * len_categories\n",
    "        background[n] = \"1\"\n",
    "        trans_dict.update({i: ''.join(background)})\n",
    "    return trans, trans_dict\n",
    "\n",
    "\n",
    "def transform(col_li, pd_data):\n",
    "    for k in col_li:\n",
    "        trans_data, trans_dict = one_hot(pd_data[k])\n",
    "        for n, i in enumerate(trans_dict.keys()):\n",
    "            pd_data[f\"{k}_{i}\"] = trans_data[:, n]\n",
    "        pd_data = pd_data.drop(k, axis=1)\n",
    "    return pd_data\n",
    "\n",
    "pd_data_onehot = pd_data_bucket.copy()\n",
    "columns = pd_data_onehot.columns.values\n",
    "features = [col for col in columns if col not in [\"id\", \"y\"]]\n",
    "# print(features)\n",
    "\n",
    "# features = ['y']\n",
    "pd_data_trans = transform(features, pd_data_onehot)\n",
    "print(pd_data_trans)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a9648d0f-c983-407d-b8e2-2a8d8e468a12",
   "metadata": {},
   "source": [
    "### label编码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "id": "b1014672-3f2b-439d-a3f3-b784f4f2f8ed",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      id  y   x0   x1   x2   x3   x4   x5   x6   x7   x8   x9\n",
      "0    133  1  271   68  315  327  140  204  176  273  190  116\n",
      "1    273  1   27  111   24   28  278   50   36   62  233  269\n",
      "2    175  1    4   30    4    4  108   22    0    0  146  222\n",
      "3    551  1   67  310   73   69   73  173  164   99  326  251\n",
      "4    199  0  288  342  324  353  315  393  401  353  434  404\n",
      "..   ... ..  ...  ...  ...  ...  ...  ...  ...  ...  ...  ...\n",
      "495  492  0  332  247  382  401  188  259  257  295  347  186\n",
      "496  181  0  392  408  431  460  286  462  444  435  420  459\n",
      "497  427  1   92  387  117  104  185  161  204  129  270  191\n",
      "498  471  1  132  404  154  166   56   55   50   68   83   60\n",
      "499  515  1   84  166   82  101  285  139  170  171  298   75\n",
      "\n",
      "[500 rows x 12 columns]\n"
     ]
    }
   ],
   "source": [
    "from sklearn import preprocessing\n",
    "\n",
    "\n",
    "def label_encode(df, encode_columns, left_columns):\n",
    "    df_transform = df[encode_columns]\n",
    "    column_types = {\n",
    "        col: t.char\n",
    "        for col, t in dict(df_transform.dtypes).items()\n",
    "    }\n",
    "    for col, d_char in column_types.items():\n",
    "        if d_char == \"0\":\n",
    "            df_transform[col] = df_transform[col].astype(\"str\")\n",
    "\n",
    "    encoder_map = {\n",
    "        col: preprocessing.LabelEncoder()\n",
    "        for col in encode_columns\n",
    "    }\n",
    "    column_encoder = {}\n",
    "    for col in encode_columns:\n",
    "        new_series = encoder_map[col].fit_transform(df_transform[col])\n",
    "        column_encoder[col] = new_series\n",
    "\n",
    "    info_class = {}\n",
    "    for col in encoder_map:\n",
    "        for idx, cls_ in enumerate(list(encoder_map[col].classes_)):\n",
    "            class_map = info_class.setdefault(col, {})\n",
    "            class_map[cls_] = idx\n",
    "\n",
    "    df_encoder = pandas.DataFrame(column_encoder)\n",
    "    concat_df = [\n",
    "        df[left_columns].reset_index(drop=True),\n",
    "        df_encoder.reset_index(drop=True)\n",
    "    ]\n",
    "    result_df = pandas.concat(concat_df, axis=1)\n",
    "    return result_df\n",
    "\n",
    "df = pandas.read_csv(filepath)\n",
    "columns = df.columns.values\n",
    "encode_columns = [column for column in columns if column not in [\"id\", \"y\"]]\n",
    "left_columns = [column for column in columns if column in [\"id\", \"y\"]]\n",
    "\n",
    "labled_df = label_encode(df, encode_columns, left_columns)\n",
    "print(labled_df)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8650af02-760c-4dc4-8f1a-6fdc822868b8",
   "metadata": {},
   "source": [
    "### 斯皮尔曼"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "id": "44108d2e-0f68-4d90-8458-e96316aa04d1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "          x0        x1        x2        x3        x4        x5        x6  \\\n",
      "x0  1.000000  0.386367  0.993628  0.998921  0.225754  0.562480  0.658486   \n",
      "x1  0.386367  1.000000  0.396573  0.386952  0.227079  0.347959  0.388180   \n",
      "x2  0.993628  0.396573  1.000000  0.992663  0.246573  0.616715  0.703668   \n",
      "x3  0.998921  0.386952  0.992663  1.000000  0.217765  0.554573  0.653975   \n",
      "x4  0.225754  0.227079  0.246573  0.217765  1.000000  0.562667  0.524881   \n",
      "x5  0.562480  0.347959  0.616715  0.554573  0.562667  1.000000  0.917411   \n",
      "x6  0.658486  0.388180  0.703668  0.653975  0.524881  0.917411  1.000000   \n",
      "x7  0.780615  0.369382  0.813439  0.774194  0.553822  0.852918  0.907852   \n",
      "x8  0.259262  0.228734  0.284339  0.251083  0.521049  0.545430  0.487643   \n",
      "x9  0.139312  0.190610  0.189914  0.131329  0.619929  0.765592  0.630671   \n",
      "\n",
      "          x7        x8        x9  \n",
      "x0  0.780615  0.259262  0.139312  \n",
      "x1  0.369382  0.228734  0.190610  \n",
      "x2  0.813439  0.284339  0.189914  \n",
      "x3  0.774194  0.251083  0.131329  \n",
      "x4  0.553822  0.521049  0.619929  \n",
      "x5  0.852918  0.545430  0.765592  \n",
      "x6  0.907852  0.487643  0.630671  \n",
      "x7  1.000000  0.462797  0.534004  \n",
      "x8  0.462797  1.000000  0.520485  \n",
      "x9  0.534004  0.520485  1.000000  \n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "df = pd.read_csv(filepath)\n",
    "columns = df.columns.values\n",
    "features = [col for col in columns if col not in [\"id\", \"y\"]]\n",
    "df_x = df[features]\n",
    "\n",
    "r = df_x.corr('spearman')\n",
    "print(r)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1d5dea25-67f3-4223-b654-857a27357b1d",
   "metadata": {},
   "source": [
    "### WOE IV"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "id": "f5fa8966-2922-4553-991d-a9c1f4247df4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "woe_dict= {'x0': {0.0: 3.4828911146319177, 1.0: 0.3686068874467589, 2.0: -4.907635046960887, 3.0: -6.044590690826643, 4.0: -4.874519438176389}, 'x1': {0.0: 1.733481187119698, 1.0: 0.5643154035642666, 2.0: -1.0212535334133028, 3.0: -1.0030356618205514, 4.0: -4.411895916228276}, 'x2': {0.0: 4.020268347431084, 1.0: -0.08742851553937947, 2.0: -7.153368251271604, 3.0: -5.922487994025743, 4.0: -4.590944147677261}, 'x3': {0.0: 1.234691846288548, 1.0: -3.9244095473101286, 2.0: -6.187180548252826, 3.0: -4.874519438176389, 4.0: -2.8779655563023208}, 'x4': {0.0: 2.1285753456932848, 1.0: 0.6733161186628108, 2.0: -0.5548914941869373, 3.0: -1.659418040524497, 4.0: -1.5161622151907257}, 'x5': {0.0: 1.2248887124577075, 1.0: -0.8700156808622563, 2.0: -2.40903087424549, 3.0: -4.990929790020799, 4.0: -4.411895916228276}, 'x6': {0.0: 2.0960926476070045, 1.0: -1.2491060000582936, 2.0: -2.841663929626106, 3.0: -1.8299970004529658, 4.0: -1.1266974484290027}, 'x7': {0.0: 4.060318927714639, 1.0: 1.9416214720769875, 2.0: -1.157423883805754, 3.0: -4.729863384268909, 4.0: -6.1533935506754425}, 'x8': {0.0: 0.9038848952627683, 1.0: -0.016102087940778192, 2.0: -2.229982642796505, 3.0: -5.275860829100691, 4.0: -3.524592721227373}, 'x9': {0.0: 0.49018133756531024, 1.0: -0.7310324750781498, 2.0: -1.1172231947093205, 3.0: -0.48007028350395026, 4.0: -2.8779655563023208}}\n",
      "iv_dict= {'x0': 5.2016275224004715, 'x1': 0.9956426827103921, 'x2': 6.225012522327391, 'x3': 4.007145224011798, 'x4': 0.8233555568556905, 'x5': 1.6777629319618534, 'x6': 2.8237290120954848, 'x7': 5.350183186820864, 'x8': 0.9566936507045242, 'x9': 0.39645432091763844}\n"
     ]
    }
   ],
   "source": [
    "import pandas\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "def calculate_woe_iv(feature,\n",
    "                     y,\n",
    "                     total_good,\n",
    "                     total_bad,\n",
    "                     smooth_const=0.1):\n",
    "    \"\"\"\n",
    "    Compute the woe and iv of single feature\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    feature: feature array, pandas.Series\n",
    "    y: label, pandas.Series\n",
    "    total_good: int, good sample num\n",
    "    total_bad: int, bad sample num\n",
    "    smooth_const: float, must be positive or zero, for Laplace smooth, default equals zero.\n",
    "\n",
    "    Returns\n",
    "    -------\n",
    "    iv: float, iv value\n",
    "    woe: dict, woe of each group\n",
    "    \"\"\"\n",
    "    df = pandas.DataFrame({'feature': feature, 'y': y})\n",
    "\n",
    "    # groupby\n",
    "    grouped = df.groupby('feature')['y'].agg(['sum', 'count'])\n",
    "    grouped.columns = ['good', 'total']\n",
    "\n",
    "    group_num = grouped.shape[0]\n",
    "\n",
    "    # compute percent\n",
    "    grouped['bad'] = grouped['total'] - grouped['good']\n",
    "    grouped['good_pct'] = (grouped['good'] + smooth_const) / (\n",
    "        total_good + group_num * smooth_const)\n",
    "    grouped['bad_pct'] = (grouped['bad'] + smooth_const) / (\n",
    "        total_bad + group_num * smooth_const)\n",
    "\n",
    "    # compute woe\n",
    "    grouped['woe'] = np.log(grouped['good_pct'] / grouped['bad_pct'])\n",
    "\n",
    "    # compute iv\n",
    "    grouped['iv'] = (grouped['good_pct'] -\n",
    "                     grouped['bad_pct']) * grouped['woe']\n",
    "    iv = grouped['iv'].sum()\n",
    "\n",
    "    woe = grouped[\"woe\"].to_dict()\n",
    "\n",
    "    return iv, woe\n",
    "\n",
    "\n",
    "# filepath = \"./data/fujian-poc/vert_promoter.csv\"\n",
    "# df = pandas.read_csv(filepath)\n",
    "df = pd_data_bucket\n",
    "columns = df.columns.values\n",
    "if \"y\" not in columns:\n",
    "    raise ValueError(\"column 'y' is necessary\")\n",
    "\n",
    "features = [col for col in columns if col not in [\"id\", \"y\"]]\n",
    "df_x = df[features]\n",
    "df_y = df[\"y\"]\n",
    "\n",
    "total_good = df_y.sum()\n",
    "total_bad = df_y.shape[0] - total_good\n",
    "\n",
    "woe_dict, iv_dict = {}, {}\n",
    "    \n",
    "for col in features:\n",
    "    iv, woe = calculate_woe_iv(df_x[col], df_y, total_good, total_bad)\n",
    "    woe_dict[col] = woe\n",
    "    iv_dict[col] = iv\n",
    "    \n",
    "print(\"woe_dict=\", woe_dict)\n",
    "print(\"iv_dict=\", iv_dict)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
