{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.\t请编写程序实现决策树算法中的选择属性进行分裂的计算过程，即实现选择使信息增益率最大的属性的过程，输入是数据集，输出是信息增益率最大的属性。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import math"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([['A11', 6.0, 'A34', ..., 'A192', 'A201', 1],\n",
       "       ['A12', 48.0, 'A32', ..., 'A191', 'A201', 2],\n",
       "       ['A14', 12.0, 'A34', ..., 'A191', 'A201', 1],\n",
       "       ...,\n",
       "       ['A14', 12.0, 'A32', ..., 'A191', 'A201', 1],\n",
       "       ['A11', 45.0, 'A32', ..., 'A192', 'A201', 2],\n",
       "       ['A12', 45.0, 'A34', ..., 'A191', 'A201', 1]], dtype=object)"
      ]
     },
     "execution_count": 67,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 数据集导入\n",
    "german_clean=np.array(pd.read_csv('german_clean.csv',header=0).fillna(0))\n",
    "german_clean"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([['A11', False, 'A34', ..., 'A192', 'A201', 1],\n",
       "       ['A12', True, 'A32', ..., 'A191', 'A201', 2],\n",
       "       ['A14', False, 'A34', ..., 'A191', 'A201', 1],\n",
       "       ...,\n",
       "       ['A14', False, 'A32', ..., 'A191', 'A201', 1],\n",
       "       ['A11', True, 'A32', ..., 'A192', 'A201', 2],\n",
       "       ['A12', True, 'A34', ..., 'A191', 'A201', 1]], dtype=object)"
      ]
     },
     "execution_count": 68,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#均值二分法将连续值变成离散值,将数值变为True或False\n",
    "for i in range(german_clean.shape[1]-1):\n",
    "    if type(german_clean[0][i])==float:\n",
    "        german_clean[:,i]=german_clean[:,i]>german_clean[:,i].mean()\n",
    "german_clean"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8812908992306927\n"
     ]
    }
   ],
   "source": [
    "# 定义计算信息熵函数\n",
    "def calcShannonEnt(dataSet):\n",
    "    numEntires = len(dataSet)                        #返回数据集的行数\n",
    "    labelCounts = {}                                #保存每个标签(Label)出现次数的字典\n",
    "    for featVec in dataSet:                            #对每组特征向量进行统计\n",
    "        if featVec.size==0:\n",
    "            continue\n",
    "        currentLabel = featVec[-1]                    #提取标签(Label)信息\n",
    "        if currentLabel not in labelCounts.keys():    #如果标签(Label)没有放入统计次数的字典,添加进去\n",
    "            labelCounts[currentLabel] = 0\n",
    "        labelCounts[currentLabel] += 1                #Label计数\n",
    "    shannonEnt = 0.0                                #经验熵(香农熵)\n",
    "    for key in labelCounts:                            #计算香农熵\n",
    "        prob = float(labelCounts[key]) / numEntires    #选择该标签(Label)的概率\n",
    "        shannonEnt -= prob * math.log(prob, 2)            #利用公式计算\n",
    "    return shannonEnt\n",
    "print(calcShannonEnt(german_clean))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# 按照给定特征划分数据集\n",
    "def splitDataSet(dataSet, axis, value):\n",
    "    retDataSet = []                                        #创建返回的数据集列表\n",
    "    for featVec in dataSet:   #遍历数据集\n",
    "        if featVec[axis] == value:\n",
    "            reducedFeatVec = featVec[:axis]                #去掉axis特征\n",
    "            reducedFeatVec.tolist().extend(featVec[axis+1:])     #将符合条件的添加到返回的数据集\n",
    "            retDataSet.append(np.array(reducedFeatVec))\n",
    "    return retDataSet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第0个特征的增益为0.881\n",
      "第1个特征的增益为0.918\n",
      "第2个特征的增益为0.092\n",
      "第3个特征的增益为0.762\n",
      "第4个特征的增益为0.748\n",
      "第5个特征的增益为7.752\n",
      "第6个特征的增益为0.271\n",
      "第7个特征的增益为0.784\n",
      "第8个特征的增益为1.249\n",
      "第9个特征的增益为0.906\n",
      "第10个特征的增益为0.648\n",
      "第11个特征的增益为0.346\n",
      "第12个特征的增益为0.921\n",
      "第13个特征的增益为0.909\n",
      "第14个特征的增益为4.328\n",
      "第15个特征的增益为0.051\n",
      "第16个特征的增益为0.246\n",
      "第17个特征的增益为0.240\n",
      "第18个特征的增益为0.393\n",
      "第19个特征的增益为0.082\n",
      "最优特征索引值:5\n"
     ]
    }
   ],
   "source": [
    "# 选择最优特征\n",
    "def chooseBestFeatureToSplit(dataSet):\n",
    "    numFeatures = len(dataSet[0]) - 1                    #特征数量\n",
    "    baseEntropy = calcShannonEnt(dataSet)                 #计算数据集的香农熵\n",
    "    bestInfoGain = 0.0                                  #信息增益\n",
    "    bestFeature = -1                                    #最优特征的索引值\n",
    "    for i in range(numFeatures):                         #遍历所有特征\n",
    "        #获取dataSet的第i个所有特征\n",
    "        featList = [example[i] for example in dataSet]\n",
    "        uniqueVals = set(featList)                         #创建set集合{},元素不可重复\n",
    "        newEntropy = 0.0                                  #经验条件熵\n",
    "        for value in uniqueVals:                         #计算信息增益\n",
    "            subDataSet = splitDataSet(dataSet, i, value)         #subDataSet划分后的子集\n",
    "            prob = len(subDataSet) / float(len(dataSet))           #计算子集的概率\n",
    "            newEntropy += prob * calcShannonEnt(subDataSet)     #根据公式计算经验条件熵\n",
    "        infoGain = abs(baseEntropy - newEntropy )                    #信息增益\n",
    "        print(\"第%d个特征的增益为%.3f\" % (i, infoGain))            #打印每个特征的信息增益\n",
    "        if (infoGain > bestInfoGain):                             #计算信息增益\n",
    "            bestInfoGain = infoGain                             #更新信息增益，找到最大的信息增益\n",
    "            bestFeature = i                                     #记录信息增益最大的特征的索引值\n",
    "    return bestFeature\n",
    "print(\"最优特征索引值:\" + str(chooseBestFeatureToSplit(german_clean)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "## 2.\t调用Python自带的sklearn包里的决策树方法进行分类。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import AdaBoostRegressor\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import LabelBinarizer,MultiLabelBinarizer\n",
    "from sklearn.metrics import accuracy_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((900, 1065), (100, 1065), (900,), (100,))"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 读取文件，分割特征和标签，划分训练集和测试集\n",
    "german_clean=np.array(pd.read_csv('german_clean.csv',header=0))\n",
    "x,y=german_clean[:,:-1].astype(str),german_clean[:,-1]#划分特征和标签\n",
    "\n",
    "one_hot = MultiLabelBinarizer()#调用独热编码方法对特征进行独热编码\n",
    "x=one_hot.fit_transform(x)\n",
    "\n",
    "x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1)#划分训练集和测试集\n",
    "x_train.shape, x_test.shape, y_train.shape, y_test.shape "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建模型\n",
    "model = DecisionTreeClassifier(criterion=\"entropy\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DecisionTreeClassifier(criterion='entropy')"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#训练模型\n",
    "model.fit(x_train, y_train.astype('int'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.71\n"
     ]
    }
   ],
   "source": [
    "#测试模型\n",
    "predict=model.predict(x_test)#调用模型预测\n",
    "acc=(predict==y_test).sum()/y_test.shape[0]#求精度\n",
    "print(acc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tensorflow",
   "language": "python",
   "name": "tensorflow"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
