{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第1关：OvO多分类策略"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "# 逻辑回归\n",
    "class tiny_logistic_regression(object):\n",
    "    def __init__(self):\n",
    "        #W\n",
    "        self.coef_ = None\n",
    "        #b\n",
    "        self.intercept_ = None\n",
    "        #所有的W和b\n",
    "        self._theta = None\n",
    "        #01到标签的映射\n",
    "        self.label_map = {}\n",
    "\n",
    "\n",
    "    def _sigmoid(self, x):\n",
    "        return 1. / (1. + np.exp(-x))\n",
    "\n",
    "\n",
    "    #训练，train_labels中的值可以为任意数值\n",
    "    def fit(self, train_datas, train_labels, learning_rate=1e-3, n_iters=1e4):\n",
    "        #loss\n",
    "        def J(theta, X_b, y):\n",
    "            y_hat = self._sigmoid(X_b.dot(theta))\n",
    "            try:\n",
    "                return -np.sum(y*np.log(y_hat)+(1-y)*np.log(1-y_hat)) / len(y)\n",
    "            except:\n",
    "                return float('inf')\n",
    "\n",
    "        # 算theta对loss的偏导\n",
    "        def dJ(theta, X_b, y):\n",
    "            return X_b.T.dot(self._sigmoid(X_b.dot(theta)) - y) / len(y)\n",
    "\n",
    "        # 批量梯度下降\n",
    "        def gradient_descent(X_b, y, initial_theta, leraning_rate, n_iters=1e2, epsilon=1e-6):\n",
    "            theta = initial_theta\n",
    "            cur_iter = 0\n",
    "            while cur_iter < n_iters:\n",
    "                gradient = dJ(theta, X_b, y)\n",
    "                last_theta = theta\n",
    "                theta = theta - leraning_rate * gradient\n",
    "                if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):\n",
    "                    break\n",
    "                cur_iter += 1\n",
    "            return theta\n",
    "\n",
    "        unique_labels = list(set(train_labels))\n",
    "        labels = np.array(train_labels)\n",
    "\n",
    "        # 将标签映射成0，1\n",
    "        self.label_map[0] = unique_labels[0]\n",
    "        self.label_map[1] = unique_labels[1]\n",
    "\n",
    "        for i in range(len(train_labels)):\n",
    "            if train_labels[i] == self.label_map[0]:\n",
    "                labels[i] = 0\n",
    "            else:\n",
    "                labels[i] = 1\n",
    "\n",
    "        X_b = np.hstack([np.ones((len(train_datas), 1)), train_datas])\n",
    "        initial_theta = np.zeros(X_b.shape[1])\n",
    "        self._theta = gradient_descent(X_b, labels, initial_theta, learning_rate, n_iters)\n",
    "\n",
    "        self.intercept_ = self._theta[0]\n",
    "        self.coef_ = self._theta[1:]\n",
    "\n",
    "        return self\n",
    "\n",
    "\n",
    "    #预测X中每个样本label为1的概率\n",
    "    def predict_proba(self, X):\n",
    "        X_b = np.hstack([np.ones((len(X), 1)), X])\n",
    "        return self._sigmoid(X_b.dot(self._theta))\n",
    "\n",
    "    #预测\n",
    "    def predict(self, X):\n",
    "        proba = self.predict_proba(X)\n",
    "        result = np.array(proba >= 0.5, dtype='int')\n",
    "        # 将0，1映射成标签\n",
    "        for i in range(len(result)):\n",
    "            if result[i] == 0:\n",
    "                result[i] = self.label_map[0]\n",
    "            else:\n",
    "                result[i] = self.label_map[1]\n",
    "        return result\n",
    "\n",
    "\n",
    "\n",
    "class OvO(object):\n",
    "    def __init__(self):\n",
    "        # 用于保存训练时各种模型的list\n",
    "        self.models = []\n",
    "\n",
    "\n",
    "    def fit(self, train_datas, train_labels):\n",
    "        '''\n",
    "        OvO的训练阶段，将模型保存到self.models中\n",
    "        :param train_datas: 训练集数据，类型为ndarray\n",
    "        :param train_labels: 训练集标签，标签值为0,1,2之类的整数，类型为ndarray，shape为(-1,)\n",
    "        :return:None\n",
    "        '''\n",
    "\n",
    "        #********* Begin *********#\n",
    "        # 训练三个分类器\n",
    "        idx0 = np.where(train_labels == 0)\n",
    "        idx1 = np.where(train_labels == 1)\n",
    "        idx2 = np.where(train_labels == 2)\n",
    "        x1 = np.delete(train_datas, idx2, axis=0) # 分类01\n",
    "        x2 = np.delete(train_datas, idx0, axis=0) # 分类12\n",
    "        x3 = np.delete(train_datas, idx1, axis=0) # 分类02\n",
    "        y1 = np.delete(train_labels, idx2)\n",
    "        y2 = np.delete(train_labels, idx0)\n",
    "        y3 = np.delete(train_labels, idx1)\n",
    "        # y1 = train_labels.copy()\n",
    "        # y2 = train_labels.copy()\n",
    "        # y3 = train_labels.copy()\n",
    "        # y1[idx1] = 3\n",
    "        # y1[idx2] = 3\n",
    "        # y2[idx0] = 3\n",
    "        # y2[idx2] = 3\n",
    "        # y3[idx0] = 3\n",
    "        # y3[idx1] = 3\n",
    "        \n",
    "        clf1 = tiny_logistic_regression()\n",
    "        clf1.fit(x1, y1)\n",
    "        clf2 = tiny_logistic_regression()\n",
    "        clf2.fit(x2, y2)\n",
    "        clf3 = tiny_logistic_regression()\n",
    "        clf3.fit(x3, y3)\n",
    "        self.models.append(clf1)\n",
    "        self.models.append(clf2)\n",
    "        self.models.append(clf3)\n",
    "\n",
    "        #********* End *********#\n",
    "\n",
    "\n",
    "    def predict(self, test_datas):\n",
    "        '''\n",
    "        OvO的预测阶段\n",
    "        :param test_datas:测试集数据，类型为ndarray\n",
    "        :return:预测结果，类型为ndarray\n",
    "        '''\n",
    "        res = np.zeros(shape=(len(test_datas), 3))\n",
    "        #********* Begin *********#\n",
    "        for i,model in enumerate(self.models):\n",
    "            pred = model.predict(test_datas)\n",
    "            \n",
    "            res[np.where(pred == i), i] += 1\n",
    "        return np.argmax(res, axis=1)\n",
    "        #********* End *********#\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第2关：OvR多分类策略"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "# 逻辑回归\n",
    "class tiny_logistic_regression(object):\n",
    "    def __init__(self):\n",
    "        #W\n",
    "        self.coef_ = None\n",
    "        #b\n",
    "        self.intercept_ = None\n",
    "        #所有的W和b\n",
    "        self._theta = None\n",
    "        #01到标签的映射\n",
    "        self.label_map = {}\n",
    "\n",
    "\n",
    "    def _sigmoid(self, x):\n",
    "        return 1. / (1. + np.exp(-x))\n",
    "\n",
    "\n",
    "    #训练，train_labels中的值可以为任意数值\n",
    "    def fit(self, train_datas, train_labels, learning_rate=1e-3, n_iters=1e4):\n",
    "        #loss\n",
    "        def J(theta, X_b, y):\n",
    "            y_hat = self._sigmoid(X_b.dot(theta))\n",
    "            try:\n",
    "                return -np.sum(y*np.log(y_hat)+(1-y)*np.log(1-y_hat)) / len(y)\n",
    "            except:\n",
    "                return float('inf')\n",
    "\n",
    "        # 算theta对loss的偏导\n",
    "        def dJ(theta, X_b, y):\n",
    "            return X_b.T.dot(self._sigmoid(X_b.dot(theta)) - y) / len(y)\n",
    "\n",
    "        # 批量梯度下降\n",
    "        def gradient_descent(X_b, y, initial_theta, leraning_rate, n_iters=1e2, epsilon=1e-6):\n",
    "            theta = initial_theta\n",
    "            cur_iter = 0\n",
    "            while cur_iter < n_iters:\n",
    "                gradient = dJ(theta, X_b, y)\n",
    "                last_theta = theta\n",
    "                theta = theta - leraning_rate * gradient\n",
    "                if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):\n",
    "                    break\n",
    "                cur_iter += 1\n",
    "            return theta\n",
    "\n",
    "        unique_labels = list(set(train_labels))\n",
    "        labels = np.array(train_labels)\n",
    "\n",
    "        # 将标签映射成0，1\n",
    "        self.label_map[0] = unique_labels[0]\n",
    "        self.label_map[1] = unique_labels[1]\n",
    "\n",
    "        for i in range(len(train_labels)):\n",
    "            if train_labels[i] == self.label_map[0]:\n",
    "                labels[i] = 0\n",
    "            else:\n",
    "                labels[i] = 1\n",
    "\n",
    "        X_b = np.hstack([np.ones((len(train_datas), 1)), train_datas])\n",
    "        initial_theta = np.zeros(X_b.shape[1])\n",
    "        self._theta = gradient_descent(X_b, labels, initial_theta, learning_rate, n_iters)\n",
    "\n",
    "        self.intercept_ = self._theta[0]\n",
    "        self.coef_ = self._theta[1:]\n",
    "\n",
    "        return self\n",
    "\n",
    "\n",
    "    #预测X中每个样本label为1的概率\n",
    "    def predict_proba(self, X):\n",
    "        X_b = np.hstack([np.ones((len(X), 1)), X])\n",
    "        return self._sigmoid(X_b.dot(self._theta))\n",
    "\n",
    "    #预测\n",
    "    def predict(self, X):\n",
    "        proba = self.predict_proba(X)\n",
    "        result = np.array(proba >= 0.5, dtype='int')\n",
    "        # 将0，1映射成标签\n",
    "        for i in range(len(result)):\n",
    "            if result[i] == 0:\n",
    "                result[i] = self.label_map[0]\n",
    "            else:\n",
    "                result[i] = self.label_map[1]\n",
    "        return result\n",
    "\n",
    "\n",
    "import numpy as np\n",
    "# 逻辑回归\n",
    "class tiny_logistic_regression(object):\n",
    "    def __init__(self):\n",
    "        #W\n",
    "        self.coef_ = None\n",
    "        #b\n",
    "        self.intercept_ = None\n",
    "        #所有的W和b\n",
    "        self._theta = None\n",
    "        #01到标签的映射\n",
    "        self.label_map = {}\n",
    "    def _sigmoid(self, x):\n",
    "        return 1. / (1. + np.exp(-x))\n",
    "    #训练\n",
    "    def fit(self, train_datas, train_labels, learning_rate=1e-4, n_iters=1e3):\n",
    "        #loss\n",
    "        def J(theta, X_b, y):\n",
    "            y_hat = self._sigmoid(X_b.dot(theta))\n",
    "            try:\n",
    "                return -np.sum(y*np.log(y_hat)+(1-y)*np.log(1-y_hat)) / len(y)\n",
    "            except:\n",
    "                return float('inf')\n",
    "        # 算theta对loss的偏导\n",
    "        def dJ(theta, X_b, y):\n",
    "            return X_b.T.dot(self._sigmoid(X_b.dot(theta)) - y) / len(y)\n",
    "        # 批量梯度下降\n",
    "        def gradient_descent(X_b, y, initial_theta, leraning_rate, n_iters=1e2, epsilon=1e-6):\n",
    "            theta = initial_theta\n",
    "            cur_iter = 0\n",
    "            while cur_iter < n_iters:\n",
    "                gradient = dJ(theta, X_b, y)\n",
    "                last_theta = theta\n",
    "                theta = theta - leraning_rate * gradient\n",
    "                if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):\n",
    "                    break\n",
    "                cur_iter += 1\n",
    "            return theta\n",
    "        X_b = np.hstack([np.ones((len(train_datas), 1)), train_datas])\n",
    "        initial_theta = np.zeros(X_b.shape[1])\n",
    "        self._theta = gradient_descent(X_b, train_labels, initial_theta, learning_rate, n_iters)\n",
    "        self.intercept_ = self._theta[0]\n",
    "        self.coef_ = self._theta[1:]\n",
    "        return self\n",
    "    #预测X中每个样本label为1的概率\n",
    "    def predict_proba(self, X):\n",
    "        X_b = np.hstack([np.ones((len(X), 1)), X])\n",
    "        return self._sigmoid(X_b.dot(self._theta))\n",
    "    #预测\n",
    "    def predict(self, X):\n",
    "        proba = self.predict_proba(X)\n",
    "        result = np.array(proba >= 0.5, dtype='int')\n",
    "        return result\n",
    "class OvR(object):\n",
    "    def __init__(self):\n",
    "        # 用于保存训练时各种模型的list\n",
    "        self.models = []\n",
    "        # 用于保存models中对应的正例的真实标签\n",
    "        # 例如第1个模型的正例是2，则real_label[0]=2\n",
    "        self.real_label = []\n",
    "    def fit(self, train_datas, train_labels):\n",
    "        '''\n",
    "        OvO的训练阶段，将模型保存到self.models中\n",
    "        :param train_datas: 训练集数据，类型为ndarray\n",
    "        :param train_labels: 训练集标签，类型为ndarray，shape为(-1,)\n",
    "        :return:None\n",
    "        '''\n",
    "        #********* Begin *********#\n",
    "        # 训练三个分类器\n",
    "        idx0 = np.where(train_labels == 0)\n",
    "        idx1 = np.where(train_labels == 1)\n",
    "        idx2 = np.where(train_labels == 2)\n",
    "        y1 = train_labels.copy()\n",
    "        y2 = train_labels.copy()\n",
    "        y3 = train_labels.copy()\n",
    "        y1[idx0] = 1\n",
    "        y1[idx1] = 0\n",
    "        y1[idx2] = 0\n",
    "\n",
    "        y2[idx1] = 1\n",
    "        y2[idx0] = 0\n",
    "        y2[idx2] = 0\n",
    "\n",
    "        y3[idx2] = 1\n",
    "        y3[idx0] = 0\n",
    "        y3[idx1] = 0\n",
    "        \n",
    "        clf1 = tiny_logistic_regression()\n",
    "        clf1.fit(train_datas, y1)\n",
    "        clf2 = tiny_logistic_regression()\n",
    "        clf2.fit(train_datas, y2)\n",
    "        clf3 = tiny_logistic_regression()\n",
    "        clf3.fit(train_datas, y3)\n",
    "        self.models.append(clf1)\n",
    "        self.models.append(clf2)\n",
    "        self.models.append(clf3)\n",
    "        #********* End *********#\n",
    "\n",
    "    def predict(self, test_datas):\n",
    "        '''\n",
    "        OvO的预测阶段\n",
    "        :param test_datas:测试集数据，类型为ndarray\n",
    "        :return:预测结果，类型为ndarray\n",
    "        '''\n",
    "        #********* Begin *********#\n",
    "        res = np.zeros(shape=(len(test_datas), 3))\n",
    "        for i,model in enumerate(self.models):\n",
    "            pred = model.predict_proba(test_datas)\n",
    "            res[:, i] = pred\n",
    "        return np.argmax(res, axis=1)\n",
    "        #********* End *********#\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([2, 0, 0, 0, 0, 0])"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_datas = np.array([[1,1,1],\n",
    "                       [2,2,2],\n",
    "                       [2,3,4],\n",
    "                       [3,4,5],\n",
    "                       [5,4,3],\n",
    "                       [7,6,5]],dtype=np.float32)\n",
    "train_labels = np.array([0,0,1,1,2,2])\n",
    "test_datas = np.array([[1.1,1.01,0.98],\n",
    "                       [2.02,2.12,1.99],\n",
    "                       [2.29,3.3,4.4],\n",
    "                       [3,3.9,5.1],\n",
    "                       [5.03,4.1,3.01],\n",
    "                       [7.05,5.98,5.1]],dtype=np.float32)\n",
    "\n",
    "clf = OvR()\n",
    "clf.fit(train_datas, train_labels)\n",
    "clf.predict(test_datas)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tf38",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
