{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 74,
   "id": "1c102336-9eea-4665-a3db-290fdcaa0700",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "已生成X，结构: (1060, 1024)\n",
      "已生成Y, 结构: (1060,)\n",
      "0.660377358490566\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.svm import SVC\n",
    "import cv2 as cv\n",
    "import os\n",
    "import pickle\n",
    "############################## 获取人的面部图像并保存 ##############################\n",
    "X=[]\n",
    "Y=[]\n",
    "for f in os.listdir('picture/'):\n",
    "    if \"ipynb_checkpoints\" not in f:\n",
    "        #print(f)\n",
    "        img = cv.imread('picture/'+f) # 以灰度图读入\n",
    "        img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n",
    "        img_gray = cv.equalizeHist(img_gray)  # 把图像的范围拉开\n",
    "        # 1. 创建级联分类器\n",
    "        face_cascade = cv.CascadeClassifier()\n",
    "        # 2. 引入训练好的可用于人脸识别的级联分类器模型\n",
    "        face_cascade.load(\"haarcascade_frontalface_alt.xml\")\n",
    "        # 3. 用此级联分类器识别图像中的所有人脸信息，返回一个包含有所有识别的人联系系的列表\n",
    "        # 列表中每一个元素包含四个值：面部左上角的坐标(x,y) 以及面部的宽和高(w,h)\n",
    "        faces = face_cascade.detectMultiScale(img_gray)\n",
    "        for (x, y, w, h) in faces:\n",
    "            img_new=img_gray[y:y+h,x:x+w]\n",
    "        new_img = cv.resize(img_new, (32, 32), interpolation=cv.INTER_NEAREST)\n",
    "        new_img = new_img.reshape(-1)\n",
    "        X.append(new_img)\n",
    "        # 下面代码从以上这种完整路径中首先获得按照/分割的最后一个元素，也就是文件名Chris Pratt_722 (41).jpg\n",
    "        # 然后再获得按照.分割的数组中的第一个元素，也就是 Chris Pratt\n",
    "        label= f.split(os.path.sep)[-1].split('_')[0]\n",
    "        Y.append(label)\n",
    "    else:\n",
    "        continue\n",
    "#X=np.array(X)\n",
    "#Y=np.array(Y)\n",
    "#利用pickle将X，Y封装成包\n",
    "with open(\"X\",'wb') as f:\n",
    "    pickle.dump(X,f)\n",
    "    print(\"已生成X，结构:\",np.shape(X))\n",
    "with open(\"Y\",'wb') as f:\n",
    "    pickle.dump(Y,f)\n",
    "    print(\"已生成Y, 结构:\",np.shape(Y))\n",
    "############################## 训练模型 ##############################\n",
    "with open(\"X\",'rb') as f:\n",
    "    X=pickle.load(f)\n",
    "with open(\"Y\",'rb') as f:\n",
    "    Y=pickle.load(f)\n",
    "#X=PCA(0.9).fit_transform(X)\n",
    "# 注意此题目只为熟练PCA的使用，真正应用环境中因为还需要对未知数据进行predict\n",
    "# 所以要用到pca的模型本身，可写成\n",
    "pca=PCA(0.9).fit(X)#<-- 这样pca就可以被pickle或者直接被之后的代码所使用了\n",
    "X=pca.transform(X)\n",
    "X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3)\n",
    "\n",
    "#SVM模型\n",
    "svc=SVC()\n",
    "svc.fit(X_train,Y_train)\n",
    "\n",
    "#获得算法准确率\n",
    "acc=svc.score(X_test,Y_test)\n",
    "print(acc)\n",
    "\n",
    "#封装模型\n",
    "with open(\"svc\",'wb') as f:\n",
    "    pickle.dump(svc,f)\n",
    "############################## 预测数据 ##############################"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "0db54730-058f-49a6-a132-8ad1fbeece75",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple/\n",
      "Collecting opencv-contrib\n",
      "  Downloading https://pypi.tuna.tsinghua.edu.cn/packages/7b/b9/bc8b30fd9a4d6e96e3b91b168e657892b238b80d35b688d287cd6c2eb5b6/opencv_contrib-1.0.0.14-py3-none-any.whl (7.3 kB)\n",
      "Collecting opencv-contrib-python\n",
      "  Downloading https://pypi.tuna.tsinghua.edu.cn/packages/34/45/c8bc145b1541d1fbbf25d5494cd76453d9855971cfe571b9ad7e13cdb4c8/opencv_contrib_python-4.6.0.66-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (67.1 MB)\n",
      "     |████████████████████████████████| 67.1 MB 1.3 MB/s            \n",
      "\u001b[?25hRequirement already satisfied: numpy>=1.17.3 in /opt/conda/lib/python3.9/site-packages (from opencv-contrib-python->opencv-contrib) (1.19.5)\n",
      "Installing collected packages: opencv-contrib-python, opencv-contrib\n",
      "Successfully installed opencv-contrib-1.0.0.14 opencv-contrib-python-4.6.0.66\n"
     ]
    }
   ],
   "source": [
    "!pip install opencv-contrib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "id": "0bb163ec-ba74-4f12-82aa-e5380605157a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.7327044025157232"
      ]
     },
     "execution_count": 68,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "############################## 训练模型 ##############################\n",
    "with open(\"X\",'rb') as f:\n",
    "    X=pickle.load(f)\n",
    "with open(\"Y\",'rb') as f:\n",
    "    Y=pickle.load(f)\n",
    "#X=PCA(0.9).fit_transform(X)\n",
    "# 注意此题目只为熟练PCA的使用，真正应用环境中因为还需要对未知数据进行predict\n",
    "# 所以要用到pca的模型本身，可写成\n",
    "pca=PCA(0.89).fit(X)#<-- 这样pca就可以被pickle或者直接被之后的代码所使用了\n",
    "X=pca.transform(X)\n",
    "X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3)\n",
    "\n",
    "#SVM模型\n",
    "svc=SVC(C=5.2)\n",
    "svc.fit(X_train,Y_train)\n",
    "\n",
    "#获得算法准确率\n",
    "acc=svc.score(X_test,Y_test)\n",
    "acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "id": "1e01b108-5932-4c67-b8e4-7dbc1fb0fb66",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "acc 0.5628930817610063\n"
     ]
    }
   ],
   "source": [
    "from sklearn.ensemble import RandomForestClassifier\n",
    "# 3. 创建随机森林模型\n",
    "clf = RandomForestClassifier()\n",
    "# 相当于 clf = RandomForestClassifier(n_estimators=100)\n",
    "clf.fit(X_train, Y_train)\n",
    "\n",
    "# 4. 获得算法的准确率\n",
    "acc = clf.score(X_test, Y_test)\n",
    "print(\"acc\", acc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "id": "ea6e37fc-0181-4896-afe7-5574157e756d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始搜索最佳K:\n",
      "742\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.5691823899371069"
      ]
     },
     "execution_count": 69,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "# 3. 搜索最佳K\n",
    "print(\"开始搜索最佳K:\")\n",
    "print(X_train.shape[0])\n",
    "acc_list=[]\n",
    "max_test=len(X_train)\n",
    "for k in range(1,max_test+1):\n",
    "    clf = KNeighborsClassifier(n_neighbors=k) # 1. 创建分类器\n",
    "    clf.fit(X_train, Y_train)                 # 2. fit\n",
    "    acc = clf.score(X_test, Y_test)           # 3. score\n",
    "    #print(\"K={0} 准确率: {1:.2f}%\".format(k, acc * 100))\n",
    "    acc_list.append(acc) # 纪律所有的准确率\n",
    "\n",
    "# 保存最大准确率对应的knn\n",
    "max_acc_k=np.argmax(acc_list)+1 # 利用最大准确率的位置得到对应的k：max_acc_k\n",
    "# 训练出 max_acc_k 对应的 knn 模型\n",
    "clf = KNeighborsClassifier(n_neighbors=max_acc_k).fit(X_train, Y_train)\n",
    "acc=clf.score(X_test, Y_test)\n",
    "acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "id": "1f29eb7a-622e-4290-ae8a-12735a9ef19f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(35, 135) (1060, 135)\n",
      "['Chris Pratt' 'Chris Pratt' 'Chris Pratt' 'Chris Pratt' 'Tom Holland'\n",
      " 'Chris Pratt' 'Tom Holland' 'Chris Pratt' 'Chris Pratt' 'Chris Pratt'\n",
      " 'Chris Pratt' 'Jason Momoa' 'Jason Momoa' 'Jason Momoa' 'Jason Momoa'\n",
      " 'Jason Momoa' 'Jason Momoa' 'Jason Momoa' 'margot robbie' 'Jason Momoa'\n",
      " 'margot robbie' 'margot robbie' 'Robert Downey Jr' 'Robert Downey Jr'\n",
      " 'margot robbie' 'Robert Downey Jr' 'Robert Downey Jr' 'Tom Holland'\n",
      " 'Robert Downey Jr' 'Jason Momoa' 'Stephen Amell' 'Stephen Amell'\n",
      " 'Stephen Amell' 'Stephen Amell' 'Stephen Amell']\n"
     ]
    }
   ],
   "source": [
    "############################## 预测数据 ##############################\n",
    "test=[]\n",
    "for f in os.listdir('test/'):\n",
    "    if \"ipynb_checkpoints\" not in f:\n",
    "        #print(f)\n",
    "        img = cv.imread('test/'+f) # 以灰度图读入\n",
    "        img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n",
    "        img_gray = cv.equalizeHist(img_gray)  # 把图像的范围拉开\n",
    "        # 1. 创建级联分类器\n",
    "        face_cascade = cv.CascadeClassifier()\n",
    "        # 2. 引入训练好的可用于人脸识别的级联分类器模型\n",
    "        face_cascade.load(\"haarcascade_frontalface_alt.xml\")\n",
    "        # 3. 用此级联分类器识别图像中的所有人脸信息，返回一个包含有所有识别的人联系系的列表\n",
    "        # 列表中每一个元素包含四个值：面部左上角的坐标(x,y) 以及面部的宽和高(w,h)\n",
    "        faces = face_cascade.detectMultiScale(img_gray)\n",
    "        for (x, y, w, h) in faces:\n",
    "            img_new=img_gray[y:y+h,x:x+w]\n",
    "        new_img = cv.resize(img_new, (32, 32), interpolation=cv.INTER_NEAREST)\n",
    "        new_img = new_img.reshape(-1)\n",
    "        test.append(new_img)\n",
    "test=np.array(test)\n",
    "test=pca.transform(test)\n",
    "with open(\"svc\",'rb') as f:\n",
    "    svc=pickle.load(f)\n",
    "pdt=svc.predict(test)\n",
    "print(pdt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "03be7252-84e7-4a08-a371-3ebe0e8f51f8",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
