{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "cc2ce484",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import random\n",
    "from sklearn.preprocessing import OrdinalEncoder\n",
    "from sklearn import tree"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "e8e2a0ba",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[39. 50. 38. 53. 28. 37. 49. 52. 31. 42. 30. 23. 32. 40. 34. 25. 43. 54.\n",
      " 35. 59. 56. 19. 20. 45. 22. 48. 21. 24. 57. 44. 41. 29. 18. 47. 46. 36.\n",
      " 79. 27. 67. 33. 76. 17. 55. 61. 70. 64. 71. 68. 66. 51. 58. 26. 60. 90.\n",
      " 75. 65. 77. 62. 63. 80. 72. 74. 69. 73. 81. 78. 88. 82. 83. 84. 85. 86.\n",
      " 87.]\n",
      "[' State-gov' ' Self-emp-not-inc' ' Private' ' Federal-gov' ' Local-gov'\n",
      " ' ?' ' Self-emp-inc' ' Without-pay' ' Never-worked']\n",
      "[ 77516.  83311. 215646. ...  34066.  84661. 257302.]\n",
      "[' Bachelors' ' HS-grad' ' 11th' ' Masters' ' 9th' ' Some-college'\n",
      " ' Assoc-acdm' ' Assoc-voc' ' 7th-8th' ' Doctorate' ' Prof-school'\n",
      " ' 5th-6th' ' 10th' ' 1st-4th' ' Preschool' ' 12th']\n",
      "[13.  9.  7. 14.  5. 10. 12. 11.  4. 16. 15.  3.  6.  2.  1.  8.]\n",
      "[' Never-married' ' Married-civ-spouse' ' Divorced'\n",
      " ' Married-spouse-absent' ' Separated' ' Married-AF-spouse' ' Widowed']\n",
      "[' Adm-clerical' ' Exec-managerial' ' Handlers-cleaners' ' Prof-specialty'\n",
      " ' Other-service' ' Sales' ' Craft-repair' ' Transport-moving'\n",
      " ' Farming-fishing' ' Machine-op-inspct' ' Tech-support' ' ?'\n",
      " ' Protective-serv' ' Armed-Forces' ' Priv-house-serv']\n",
      "[' Not-in-family' ' Husband' ' Wife' ' Own-child' ' Unmarried'\n",
      " ' Other-relative']\n",
      "[' White' ' Black' ' Asian-Pac-Islander' ' Amer-Indian-Eskimo' ' Other']\n",
      "[' Male' ' Female']\n",
      "[ 2174.     0. 14084.  5178.  5013.  2407. 14344. 15024.  7688. 34095.\n",
      "  4064.  4386.  7298.  1409.  3674.  1055.  3464.  2050.  2176.   594.\n",
      " 20051.  6849.  4101.  1111.  8614.  3411.  2597. 25236.  4650.  9386.\n",
      "  2463.  3103. 10605.  2964.  3325.  2580.  3471.  4865. 99999.  6514.\n",
      "  1471.  2329.  2105.  2885. 25124. 10520.  2202.  2961. 27828.  6767.\n",
      "  2228.  1506. 13550.  2635.  5556.  4787.  3781.  3137.  3818.  3942.\n",
      "   914.   401.  2829.  2977.  4934.  2062.  2354.  5455. 15020.  1424.\n",
      "  3273. 22040.  4416.  3908. 10566.   991.  4931.  1086.  7430.  6497.\n",
      "   114.  7896.  2346.  3418.  3432.  2907.  1151.  2414.  2290. 15831.\n",
      " 41310.  4508.  2538.  3456.  6418.  1848.  3887.  5721.  9562.  1455.\n",
      "  2036.  1831. 11678.  2936.  2993.  7443.  6360.  1797.  1173.  4687.\n",
      "  6723.  2009.  6097.  2653.  1639. 18481.  7978.  2387.  5060.]\n",
      "[   0. 2042. 1408. 1902. 1573. 1887. 1719. 1762. 1564. 2179. 1816. 1980.\n",
      " 1977. 1876. 1340. 2206. 1741. 1485. 2339. 2415. 1380. 1721. 2051. 2377.\n",
      " 1669. 2352. 1672.  653. 2392. 1504. 2001. 1590. 1651. 1628. 1848. 1740.\n",
      " 2002. 1579. 2258. 1602.  419. 2547. 2174. 2205. 1726. 2444. 1138. 2238.\n",
      "  625.  213. 1539.  880. 1668. 1092. 1594. 3004. 2231. 1844.  810. 2824.\n",
      " 2559. 2057. 1974.  974. 2149. 1825. 1735. 1258. 2129. 2603. 2282.  323.\n",
      " 4356. 2246. 1617. 1648. 2489. 3770. 1755. 3683. 2267. 2080. 2457.  155.\n",
      " 3900. 2201. 1944. 2467. 2163. 2754. 2472. 1411.]\n",
      "[40. 13. 16. 45. 50. 80. 30. 35. 60. 20. 52. 44. 15. 25. 38. 43. 55. 48.\n",
      " 58. 32. 70.  2. 22. 56. 41. 28. 36. 24. 46. 42. 12. 65.  1. 10. 34. 75.\n",
      " 98. 33. 54.  8.  6. 64. 19. 18. 72.  5.  9. 47. 37. 21. 26. 14.  4. 59.\n",
      "  7. 99. 53. 39. 62. 57. 78. 90. 66. 11. 49. 84.  3. 17. 68. 27. 85. 31.\n",
      " 51. 77. 63. 23. 87. 88. 73. 89. 97. 94. 29. 96. 67. 82. 86. 91. 81. 76.\n",
      " 92. 61. 74. 95.]\n",
      "[' United-States' ' Cuba' ' Jamaica' ' India' ' ?' ' Mexico' ' South'\n",
      " ' Puerto-Rico' ' Honduras' ' England' ' Canada' ' Germany' ' Iran'\n",
      " ' Philippines' ' Italy' ' Poland' ' Columbia' ' Cambodia' ' Thailand'\n",
      " ' Ecuador' ' Laos' ' Taiwan' ' Haiti' ' Portugal' ' Dominican-Republic'\n",
      " ' El-Salvador' ' France' ' Guatemala' ' China' ' Japan' ' Yugoslavia'\n",
      " ' Peru' ' Outlying-US(Guam-USVI-etc)' ' Scotland' ' Trinadad&Tobago'\n",
      " ' Greece' ' Nicaragua' ' Vietnam' ' Hong' ' Ireland' ' Hungary'\n",
      " ' Holand-Netherlands']\n",
      "[' <=50K' ' >50K']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-3-c3fb82131d7b>:51: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  df['workclass'][i] = k\n"
     ]
    }
   ],
   "source": [
    "df = pd.read_csv('C:\\\\Users\\\\96408\\\\Desktop\\\\adult.csv') # 读取文件\n",
    "df.dropna(0,'any',inplace = True) # 删除最后一行的缺失值\n",
    "\n",
    "# 发现 workclass，occupation，native—country存在缺失值，标签为？\n",
    "for i in df.columns.tolist():\n",
    "    print(df.loc[:,i].unique())\n",
    "\n",
    "# 填补workclass\n",
    "# 删掉fnlwgt,education_num,native-country,Listing of attributes\n",
    "df_1= df.drop(['fnlwgt','education-num','native-country','Listing of attributes'], 1)\n",
    "# 将df_1的分类型变量变为编码,得先把有？的行提出来\n",
    "wk = pd.DataFrame(df_1.loc[df.workclass!=' ?','workclass'])\n",
    "# workclass的类别\n",
    "label_1 = wk['workclass'].unique().tolist()\n",
    "wk['workclass'] = wk['workclass'].apply(lambda x:label_1.index(x)) # 将workclass中除了？的变量变为数值型\n",
    "\n",
    "# 将wk替换到df_1里\n",
    "for i in wk['workclass'].index.tolist():\n",
    "    df_1.loc[i,'workclass'] = wk['workclass'][i]\n",
    "# 现在workclass里除了？都是数值型变量\n",
    "# 对特征进行转化\n",
    "df_1.iloc[:,2:8] = OrdinalEncoder().fit_transform(df_1.iloc[:,2:8])# 所有分类变量处理完毕\n",
    "\n",
    "# 划分训练集\n",
    "train = df_1.loc[df.workclass != ' ?',:]\n",
    "test = df_1.loc[df.workclass == ' ?',:]\n",
    "# 修正索引\n",
    "for i in [train,test]:\n",
    "    i.index = range(i.shape[0])\n",
    "    \n",
    "Xtrain = train.loc[:,train.columns != 'workclass']\n",
    "Xtest = test.loc[:,test.columns != 'workclass']\n",
    "Ytrain = train.iloc[:,1]\n",
    "Ytrain=Ytrain.astype('int')\n",
    "Ytest = test.iloc[:,1]\n",
    "# 导入决策树\n",
    "clf = tree.DecisionTreeClassifier()# 实例化\n",
    "clf = clf.fit(Xtrain,Ytrain) # 用训练集数据训练模型\n",
    "li_1 = clf.predict(Xtest).tolist() # 决策树预测的值\n",
    "# 将预测的值变回对应的字符串变量\n",
    "li_2 = []\n",
    "for i in li_1:\n",
    "    li_2.append(label_1[i])\n",
    "index_1 = df.loc[df.workclass == ' ?'].index.tolist() # 原表中缺失值的索引\n",
    "\n",
    "# 用字典替换原数据里的值\n",
    "dic = {}\n",
    "for k,v in zip(index_1,li_2):\n",
    "    dic[k] = v\n",
    "for i,k in dic.items():\n",
    "    df['workclass'][i] = k\n",
    "    \n",
    "# 填补occupation\n",
    "index_2  = df.loc[df.occupation == ' ?'].index.tolist() # occupation缺失的索引\n",
    "for j in index_2:\n",
    "    if df.loc[j,'workclass'] == ' Without-pay' or ' Never-worked':\n",
    "        df.loc[j,'occupation'] = 'unemployed'\n",
    "\n",
    "# 填补native-country\n",
    "index_3 = df.loc[df['native-country']== ' ?'].index.tolist()  # native-country的索引\n",
    "for k in index_3:\n",
    "    if df.loc[k,'race'] ==  'Black':\n",
    "        df.loc[k,'native-country'] = 'India'\n",
    "    elif df.loc[k,'race'] == ' White':\n",
    "        df.loc[k,'native-country'] = 'United-States'\n",
    "    elif df.loc[k,'race'] == 'Asian-Pac-Islander':\n",
    "        df.loc[k,'native-country'] = 'China'\n",
    "    else:\n",
    "        df.loc[k,'native-country'] = ' Haiti'\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "8eb549ad",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "age                      0\n",
       "workclass                0\n",
       "fnlwgt                   0\n",
       "education                0\n",
       "education-num            0\n",
       "marital-status           0\n",
       "occupation               0\n",
       "relationship             0\n",
       "race                     0\n",
       "sex                      0\n",
       "capital-gain             0\n",
       "capital-loss             0\n",
       "hours-per-week           0\n",
       "native-country           0\n",
       "Listing of attributes    0\n",
       "dtype: int64"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.isnull().sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c7e3a086",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
