{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.model_selection import train_test_split"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.utils.multiclass import type_of_target\n",
    "from collections import namedtuple"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 载入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>色泽</th>\n",
       "      <th>根蒂</th>\n",
       "      <th>敲声</th>\n",
       "      <th>纹理</th>\n",
       "      <th>脐部</th>\n",
       "      <th>触感</th>\n",
       "      <th>密度</th>\n",
       "      <th>含糖率</th>\n",
       "      <th>好瓜</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>青绿</td>\n",
       "      <td>蜷缩</td>\n",
       "      <td>浊响</td>\n",
       "      <td>清晰</td>\n",
       "      <td>凹陷</td>\n",
       "      <td>硬滑</td>\n",
       "      <td>0.697</td>\n",
       "      <td>0.460</td>\n",
       "      <td>是</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>乌黑</td>\n",
       "      <td>蜷缩</td>\n",
       "      <td>沉闷</td>\n",
       "      <td>清晰</td>\n",
       "      <td>凹陷</td>\n",
       "      <td>硬滑</td>\n",
       "      <td>0.774</td>\n",
       "      <td>0.376</td>\n",
       "      <td>是</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>乌黑</td>\n",
       "      <td>蜷缩</td>\n",
       "      <td>浊响</td>\n",
       "      <td>清晰</td>\n",
       "      <td>凹陷</td>\n",
       "      <td>硬滑</td>\n",
       "      <td>0.634</td>\n",
       "      <td>0.264</td>\n",
       "      <td>是</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>青绿</td>\n",
       "      <td>蜷缩</td>\n",
       "      <td>沉闷</td>\n",
       "      <td>清晰</td>\n",
       "      <td>凹陷</td>\n",
       "      <td>硬滑</td>\n",
       "      <td>0.608</td>\n",
       "      <td>0.318</td>\n",
       "      <td>是</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>浅白</td>\n",
       "      <td>蜷缩</td>\n",
       "      <td>浊响</td>\n",
       "      <td>清晰</td>\n",
       "      <td>凹陷</td>\n",
       "      <td>硬滑</td>\n",
       "      <td>0.556</td>\n",
       "      <td>0.215</td>\n",
       "      <td>是</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   色泽  根蒂  敲声  纹理  脐部  触感     密度    含糖率 好瓜\n",
       "0  青绿  蜷缩  浊响  清晰  凹陷  硬滑  0.697  0.460  是\n",
       "1  乌黑  蜷缩  沉闷  清晰  凹陷  硬滑  0.774  0.376  是\n",
       "2  乌黑  蜷缩  浊响  清晰  凹陷  硬滑  0.634  0.264  是\n",
       "3  青绿  蜷缩  沉闷  清晰  凹陷  硬滑  0.608  0.318  是\n",
       "4  浅白  蜷缩  浊响  清晰  凹陷  硬滑  0.556  0.215  是"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data = pd.read_csv(\"../data/watermelon3.0.csv\")\n",
    "data = data[[\"色泽\", \"根蒂\", \"敲声\", \"纹理\", \"脐部\", \"触感\", \"密度\" , \"含糖率\", \"好瓜\"]]\n",
    "data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据预处理\n",
    "对离散数据的编码参考[stackoverflow](https://stackoverflow.com/questions/24458645/label-encoding-across-multiple-columns-in-scikit-learn)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 建立模型\n",
    "代码参考[Github](https://github.com/han1057578619/MachineLearning_Zhouzhihua_ProblemSets/blob/master/ch7--贝叶斯分类/7.3/7.3-NaiveBayes.py)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### NaiveBayes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_nb(X, y):\n",
    "    m, n = X.shape\n",
    "    p1 = (len(y[y == '是']) + 1) / (m + 2)  # 拉普拉斯平滑\n",
    "\n",
    "    p1_list = []  # 用于保存正例下各属性的条件概率\n",
    "    p0_list = []\n",
    "\n",
    "    X1 = X[y == '是']\n",
    "    X0 = X[y == '否']\n",
    "\n",
    "    m1, _ = X1.shape\n",
    "    m0, _ = X0.shape\n",
    "\n",
    "    for i in range(n):\n",
    "        xi = X.iloc[:, i]\n",
    "        p_xi = namedtuple(X.columns[i], ['is_continuous', 'conditional_pro'])  # 用于储存每个变量的情况\n",
    "\n",
    "        is_continuous = type_of_target(xi) == 'continuous'\n",
    "        xi1 = X1.iloc[:, i]\n",
    "        xi0 = X0.iloc[:, i]\n",
    "        if is_continuous:  # 连续值时，conditional_pro 储存的就是 [mean, var] 即均值和方差\n",
    "            xi1_mean = np.mean(xi1)\n",
    "            xi1_var = np.var(xi1)\n",
    "            xi0_mean = np.mean(xi0)\n",
    "            xi0_var = np.var(xi0)\n",
    "\n",
    "            p1_list.append(p_xi(is_continuous, [xi1_mean, xi1_var]))\n",
    "            p0_list.append(p_xi(is_continuous, [xi0_mean, xi0_var]))\n",
    "        else:  # 离散值时直接计算各类别的条件概率\n",
    "            unique_value = xi.unique()  # 取值情况\n",
    "            nvalue = len(unique_value)  # 取值个数\n",
    "\n",
    "            xi1_value_count = pd.value_counts(xi1)[unique_value].fillna(0) + 1  # 计算正样本中，该属性每个取值的数量，并且加1，即拉普拉斯平滑\n",
    "            xi0_value_count = pd.value_counts(xi0)[unique_value].fillna(0) + 1\n",
    "\n",
    "            p1_list.append(p_xi(is_continuous, np.log(xi1_value_count / (m1 + nvalue))))\n",
    "            p0_list.append(p_xi(is_continuous, np.log(xi0_value_count / (m0 + nvalue))))\n",
    "\n",
    "    return p1, p1_list, p0_list\n",
    "\n",
    "\n",
    "def predict_nb(x, p1, p1_list, p0_list):\n",
    "    n = len(x)\n",
    "\n",
    "    x_p1 = np.log(p1)\n",
    "    x_p0 = np.log(1 - p1)\n",
    "    for i in range(n):\n",
    "        p1_xi = p1_list[i]\n",
    "        p0_xi = p0_list[i]\n",
    "\n",
    "        if p1_xi.is_continuous:\n",
    "            mean1, var1 = p1_xi.conditional_pro\n",
    "            mean0, var0 = p0_xi.conditional_pro\n",
    "            x_p1 += np.log(1 / (np.sqrt(2 * np.pi) * var1) * np.exp(- (x[i] - mean1) ** 2 / (2 * var1 ** 2)))\n",
    "            x_p0 += np.log(1 / (np.sqrt(2 * np.pi) * var0) * np.exp(- (x[i] - mean0) ** 2 / (2 * var0 ** 2)))\n",
    "        else:\n",
    "            x_p1 += p1_xi.conditional_pro[x[i]]\n",
    "            x_p0 += p0_xi.conditional_pro[x[i]]\n",
    "\n",
    "    if x_p1 > x_p0:\n",
    "        return '是'\n",
    "    else:\n",
    "        return '否'\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = data.iloc[:, :-1]\n",
    "y = data.iloc[:, -1]\n",
    "p1, p1_list, p0_list = train_nb(X, y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "是\n"
     ]
    }
   ],
   "source": [
    "x_test = X.iloc[0, :]\n",
    "print(predict_nb(x_test, p1, p1_list, p0_list))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### AODE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.utils.multiclass import type_of_target\n",
    "\n",
    "class AODE(object):\n",
    "\n",
    "    def __init__(self, m):\n",
    "        self.m_hat = m\n",
    "        self.m = None\n",
    "        self.n = None\n",
    "        self.unique_y = None\n",
    "        self.n_class = None\n",
    "        self.is_continuous = None\n",
    "        self.unique_values = None\n",
    "        self.total_p = None\n",
    "\n",
    "    def predict(self, X):\n",
    "        X = np.array(X)\n",
    "        if self.total_p == None:\n",
    "            raise Exception('you have to fit first before predict.')\n",
    "\n",
    "        result = pd.DataFrame(np.zeros((X.shape[0], self.unique_y.shape[0])), columns=self.unique_y)\n",
    "\n",
    "        for i in self.total_p.keys():\n",
    "            result += self._spode_predict(X, self.total_p[i], i)\n",
    "\n",
    "        return self.unique_y[np.argmax(result.values, axis=1)]\n",
    "\n",
    "    def fit(self, X, y):\n",
    "        X = np.array(X)\n",
    "        self.m, self.n = X.shape\n",
    "        self.unique_y = np.unique(y)\n",
    "        self.n_class = self.unique_y.size\n",
    "\n",
    "        # 这里转为list, 是因为貌似type_of_target 有bug, 只有在pd.Series类型的时候才能解析为'continuous',\n",
    "        # 在这里转为array类型后解析为 'unknown'了\n",
    "        is_continuous = pd.DataFrame(X).apply(lambda x: (type_of_target(x.tolist()) == 'continuous'))\n",
    "        self.is_continuous = is_continuous\n",
    "\n",
    "        unique_values = {}  # 离散型字段的各取值\n",
    "        for i in is_continuous[~is_continuous].index:\n",
    "            unique_values[i] = np.unique(X[:, i])\n",
    "\n",
    "        self.unique_values = unique_values\n",
    "\n",
    "        # 获取可以作为父节点的属性索引，这里在论文中取值为30; 但在西瓜书中由于样本很少, 所有直接取0就好\n",
    "        parent_attribute_index = self._get_parent_attribute(X)\n",
    "\n",
    "        total_p = {}\n",
    "        for i in parent_attribute_index:\n",
    "            p = self._spode_fit(X, y, i)\n",
    "            total_p[i] = p\n",
    "\n",
    "        self.total_p = total_p\n",
    "\n",
    "        return self\n",
    "\n",
    "    def _spode_fit(self, X, y, xi_index):\n",
    "        p = pd.DataFrame(columns=self.unique_y, index=self.unique_values[xi_index])  # 储存各取值下的条件概率\n",
    "        nunique_xi = self.unique_values[xi_index].size  # 当前属性的取值数量\n",
    "\n",
    "        pc_xi_denominator = self.m + self.n_class * nunique_xi  # 计算 p(c, xi) 的分母 |D| + N * N_i\n",
    "\n",
    "        for c in self.unique_y:\n",
    "            for xi in self.unique_values[xi_index]:\n",
    "                p_list = []  # 储存y取值为c, Xi取值为xi下各个条件概率p(xj|c, xi)和先验概率p(c, xi)\n",
    "\n",
    "                c_xi = (X[:, xi_index] == xi) & (y == c)\n",
    "                X_c_xi = X[c_xi, :]  # y 取值 为c, Xi 取值为xi 的所有数据\n",
    "\n",
    "                pc_xi = (X_c_xi.shape[0] + 1) / pc_xi_denominator  # p(c, xi)\n",
    "\n",
    "                # 实际上这里在j = i时, 个人理解应该是可以跳过不计算的，因为p(xi|c, xi) = 1, 在计算中都是一样的但这里为了方便实现，就没有跳过了。\n",
    "\n",
    "                for j in range(self.n):\n",
    "                    if self.is_continuous[j]:  # 字段为连续值, 假设服从高斯分布, 保存均值和方差\n",
    "                        # 这里因为样本太少。有时候会出现X_c_xi为空或者只有一个数据的情况, 如何是离散值，依然可以计算;\n",
    "                        # 但是连续值的情况下,np.mean会报warning, 只有一个数据时,方差为0\n",
    "                        # 所有这时, 均值和方差以类别样本来替代。\n",
    "                        if X_c_xi.shape[0] <= 1:\n",
    "                            p_list.append([np.mean(X[y == c, j]), np.var(X[y == c, j])])\n",
    "                        else:\n",
    "                            p_list.append([np.mean(X_c_xi[:, j]), np.var(X_c_xi[:, j])])\n",
    "\n",
    "                    else:\n",
    "                        # 计算 p(xj|c, xi)\n",
    "                        condi_proba_of_xj = (pd.value_counts(X_c_xi[:, j])[self.unique_values[j]].fillna(0) + 1) / (\n",
    "                                X_c_xi.shape[0] + self.unique_values[j].size)\n",
    "                        p_list.append(np.log(condi_proba_of_xj))\n",
    "                p_list.append(np.log(pc_xi))  # p(c, xi)在最后的位置\n",
    "\n",
    "                p.loc[xi, c] = p_list\n",
    "\n",
    "        return p\n",
    "\n",
    "    def _spode_predict(self, X, p, xi_index):\n",
    "\n",
    "        assert X.shape[1] == self.n\n",
    "        xi = X[:, xi_index]\n",
    "        result = pd.DataFrame(np.zeros((X.shape[0], p.shape[1])), columns=self.unique_y)  # 储存每个样本为不同类别的对数概率值\n",
    "        for value in p.index:  # 为了可以使用pandas的索引功能, 对于要预测的X值, 每一次循环求同一取值下样本的条件概率和\n",
    "            xi_value = xi == value\n",
    "            X_split = X[xi_value, :]\n",
    "            for c in p.columns:\n",
    "                p_list = p.loc[value, c]  # 储存p(xj|c, xi) 和 p(c, xi)的列表\n",
    "                for j in range(self.n):  # 遍历所有的条件概率, 将对应的条件概率相加\n",
    "                    if self.is_continuous[j]:\n",
    "                        mean_, var_ = p_list[j]\n",
    "                        result.loc[xi_value, c] += (\n",
    "                                -np.log(np.sqrt(2 * np.pi) * var_) - (X_split[:, j] - mean_) ** 2 / (2 * var_ ** 2))\n",
    "                    else:\n",
    "                        result.loc[xi_value, c] += p_list[j][X_split[:, j]].values\n",
    "\n",
    "                result.loc[xi_value, c] += p_list[-1]  # 最后加上p(c, xi)\n",
    "\n",
    "        return result\n",
    "\n",
    "    def _get_parent_attribute(self, X):\n",
    "        '''\n",
    "        基于属性下各取值的样本数量，决定哪些属性可以作为父属性。\n",
    "        关于连续值的处理，在《机器学习》书中也没有提及，AODE的原论文也没有提及如何处理连续值，\n",
    "        考虑到若将连续值x_j作为父属性时，如何求解p(x_i|c, x_j)条件概率会比较麻烦(可以通过贝叶斯公式求解)，\n",
    "        此外AODE本身就是要将取值样本数量低于m的属性去除的，从这个角度来说，连续值就不能作为父属性了。\n",
    "        所以这里连续值不作为父属性\n",
    "        :param X:\n",
    "        :return:\n",
    "        '''\n",
    "\n",
    "        enough_quantity = pd.DataFrame(X).apply(\n",
    "            lambda x: (type_of_target(x.tolist()) != 'continuous') & (pd.value_counts(x) > self.m_hat).all())\n",
    "        return enough_quantity[enough_quantity].index.tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['是']\n"
     ]
    }
   ],
   "source": [
    "X = data.iloc[:, :-1]\n",
    "y = data.iloc[:, -1]\n",
    "\n",
    "aode = AODE(0)\n",
    "print(aode.fit(X, y).predict(X.iloc[[0], :]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ML",
   "language": "python",
   "name": "ml"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
