{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "view-in-github"
   },
   "source": [
    "<a href=\"https://colab.research.google.com/github/CoreTheGreat/HBPU-Machine-Learning-Course/blob/main/ML_Chapter3_Classification.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "lPboLx_o0UxI"
   },
   "source": [
    "# 第五章：深度学习\n",
    "湖北理工学院《机器学习》课程资料\n",
    "\n",
    "作者：李辉楚吴\n",
    "\n",
    "笔记内容概述: 前馈神经网络、全连接网络、Wi-Fi动作感知"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 处理原始Mat文件，与本实验无关"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: './Data/U1_G1_N10_L_L1_D0_20200408_1_Labeled.mat'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "File \u001b[1;32mD:\\AI\\Lib\\site-packages\\scipy\\io\\matlab\\_mio.py:39\u001b[0m, in \u001b[0;36m_open_file\u001b[1;34m(file_like, appendmat, mode)\u001b[0m\n\u001b[0;32m     38\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m---> 39\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(file_like, mode), \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m     40\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m     41\u001b[0m     \u001b[38;5;66;03m# Probably \"not found\"\u001b[39;00m\n",
      "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: './Data/U1_G1_N10_L_L1_D0_20200408_1_Labeled.mat'",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[1;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[3], line 50\u001b[0m\n\u001b[0;32m     46\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfilename\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m saved successfully.\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m     48\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m csi, csilabel, timestamp\n\u001b[1;32m---> 50\u001b[0m _, _, _ \u001b[38;5;241m=\u001b[39m mat2csi(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m./Data/U1_G1_N10_L_L1_D0_20200408_1_Labeled.mat\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m     51\u001b[0m _, _, _ \u001b[38;5;241m=\u001b[39m mat2csi(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m./Data/U1_G1_N30_L_L1_D0_20200408_2_Labeled.mat\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m     52\u001b[0m _, _, _ \u001b[38;5;241m=\u001b[39m mat2csi(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m./Data/U1_G2_N10_L_L1_D0_20200408_1_Labeled.mat\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
      "Cell \u001b[1;32mIn[3], line 19\u001b[0m, in \u001b[0;36mmat2csi\u001b[1;34m(matfile)\u001b[0m\n\u001b[0;32m      8\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m''' \u001b[39;00m\n\u001b[0;32m      9\u001b[0m \u001b[38;5;124;03mChange mat to csi\u001b[39;00m\n\u001b[0;32m     10\u001b[0m \u001b[38;5;124;03mExtract csi of first T-R link\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m     15\u001b[0m \u001b[38;5;124;03mtimestamp: CSI timestamp of first T-R link\u001b[39;00m\n\u001b[0;32m     16\u001b[0m \u001b[38;5;124;03m'''\u001b[39;00m\n\u001b[0;32m     18\u001b[0m \u001b[38;5;66;03m# Load the .mat file\u001b[39;00m\n\u001b[1;32m---> 19\u001b[0m mat_data \u001b[38;5;241m=\u001b[39m sio\u001b[38;5;241m.\u001b[39mloadmat(matfile)\n\u001b[0;32m     21\u001b[0m \u001b[38;5;66;03m# For example, if there's a key called 'data':\u001b[39;00m\n\u001b[0;32m     22\u001b[0m raw_timestamp \u001b[38;5;241m=\u001b[39m mat_data[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcsi\u001b[39m\u001b[38;5;124m'\u001b[39m][:,\u001b[38;5;241m0\u001b[39m]\n",
      "File \u001b[1;32mD:\\AI\\Lib\\site-packages\\scipy\\io\\matlab\\_mio.py:225\u001b[0m, in \u001b[0;36mloadmat\u001b[1;34m(file_name, mdict, appendmat, **kwargs)\u001b[0m\n\u001b[0;32m     88\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m     89\u001b[0m \u001b[38;5;124;03mLoad MATLAB file.\u001b[39;00m\n\u001b[0;32m     90\u001b[0m \n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    222\u001b[0m \u001b[38;5;124;03m    3.14159265+3.14159265j])\u001b[39;00m\n\u001b[0;32m    223\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m    224\u001b[0m variable_names \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mvariable_names\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[1;32m--> 225\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m _open_file_context(file_name, appendmat) \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[0;32m    226\u001b[0m     MR, _ \u001b[38;5;241m=\u001b[39m mat_reader_factory(f, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    227\u001b[0m     matfile_dict \u001b[38;5;241m=\u001b[39m MR\u001b[38;5;241m.\u001b[39mget_variables(variable_names)\n",
      "File \u001b[1;32mD:\\AI\\Lib\\contextlib.py:137\u001b[0m, in \u001b[0;36m_GeneratorContextManager.__enter__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    135\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mkwds, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc\n\u001b[0;32m    136\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 137\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mnext\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgen)\n\u001b[0;32m    138\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m:\n\u001b[0;32m    139\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgenerator didn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt yield\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "File \u001b[1;32mD:\\AI\\Lib\\site-packages\\scipy\\io\\matlab\\_mio.py:17\u001b[0m, in \u001b[0;36m_open_file_context\u001b[1;34m(file_like, appendmat, mode)\u001b[0m\n\u001b[0;32m     15\u001b[0m \u001b[38;5;129m@contextmanager\u001b[39m\n\u001b[0;32m     16\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_open_file_context\u001b[39m(file_like, appendmat, mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrb\u001b[39m\u001b[38;5;124m'\u001b[39m):\n\u001b[1;32m---> 17\u001b[0m     f, opened \u001b[38;5;241m=\u001b[39m _open_file(file_like, appendmat, mode)\n\u001b[0;32m     18\u001b[0m     \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m     19\u001b[0m         \u001b[38;5;28;01myield\u001b[39;00m f\n",
      "File \u001b[1;32mD:\\AI\\Lib\\site-packages\\scipy\\io\\matlab\\_mio.py:45\u001b[0m, in \u001b[0;36m_open_file\u001b[1;34m(file_like, appendmat, mode)\u001b[0m\n\u001b[0;32m     43\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m appendmat \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m file_like\u001b[38;5;241m.\u001b[39mendswith(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.mat\u001b[39m\u001b[38;5;124m'\u001b[39m):\n\u001b[0;32m     44\u001b[0m         file_like \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.mat\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m---> 45\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(file_like, mode), \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m     46\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m     47\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m(\n\u001b[0;32m     48\u001b[0m         \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mReader needs file name or open file-like object\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m     49\u001b[0m     ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n",
      "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: './Data/U1_G1_N10_L_L1_D0_20200408_1_Labeled.mat'"
     ]
    }
   ],
   "source": [
    "# Read U1_G1_N10_L_L1_D0_20200408_1_Labeled.mat file\n",
    "import scipy.io as sio\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os\n",
    "\n",
    "def mat2csi(matfile):\n",
    "    ''' \n",
    "    Change mat to csi\n",
    "    Extract csi of first T-R link\n",
    "    \n",
    "    return:\n",
    "    csi: CSI data of first T-R link\n",
    "    csilabel: CSI label of first T-R link\n",
    "    timestamp: CSI timestamp of first T-R link\n",
    "    '''\n",
    "    \n",
    "    # Load the .mat file\n",
    "    mat_data = sio.loadmat(matfile)\n",
    "    \n",
    "    # For example, if there's a key called 'data':\n",
    "    raw_timestamp = mat_data['csi'][:,0]\n",
    "    raw_csi = mat_data['csi'][:,2:32]\n",
    "    raw_csilabel = mat_data['csiLabel'].reshape(-1)\n",
    "\n",
    "    # Get indices of labels > 0\n",
    "    valid_indices = raw_csilabel >= 0\n",
    "    csi = np.abs(raw_csi[valid_indices]) # Take the absolute value of the CSI data\n",
    "    csilabel = raw_csilabel[valid_indices].astype(int) # Extract the labels\n",
    "    timestamp = raw_timestamp[valid_indices].real / 10 ** 6 # Convert the timestamp to seconds, using only the real part\n",
    "    timestamp = timestamp - timestamp[0] # Normalize the timestamp\n",
    "\n",
    "    # Change to DataFrame\n",
    "    df_combined = pd.DataFrame({\n",
    "        'timestamp': timestamp,\n",
    "        'label': csilabel,\n",
    "        **{f'Channel {i}': csi[:, i-1] for i in range(1, 31)}\n",
    "    })\n",
    "\n",
    "    # Extract filename without .mat extension\n",
    "    filename = os.path.basename(matfile).split('.')[0] + '.csv'\n",
    "    \n",
    "    # Save combined DataFrame to a single CSV file\n",
    "    df_combined.to_csv(filename, index=False)\n",
    "\n",
    "    print(f'{filename} saved successfully.')\n",
    "\n",
    "    return csi, csilabel, timestamp\n",
    "\n",
    "_, _, _ = mat2csi('./Data/U1_G1_N10_L_L1_D0_20200408_1_Labeled.mat')\n",
    "_, _, _ = mat2csi('./Data/U1_G1_N30_L_L1_D0_20200408_2_Labeled.mat')\n",
    "_, _, _ = mat2csi('./Data/U1_G2_N10_L_L1_D0_20200408_1_Labeled.mat')\n",
    "_, _, _ = mat2csi('./Data/U1_G2_N30_L_L1_D0_20200408_2_Labeled.mat')\n",
    "_, _, _ = mat2csi('./Data/U1_G3_N10_L_L1_D0_20200408_1_Labeled.mat')\n",
    "_, _, _ = mat2csi('./Data/U1_G3_N30_L_L1_D0_20200408_2_Labeled.mat')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据准备\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "载入csv数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "./Data/U1_G1_N30_L_L1_D0_20200408_2_Labeled.csv\n",
      "[0 1]\n",
      "./Data/U1_G2_N30_L_L1_D0_20200408_2_Labeled.csv\n",
      "[0 2]\n",
      "./Data/U1_G3_N30_L_L1_D0_20200408_2_Labeled.csv\n",
      "[0 3]\n",
      "./Data/U1_G1_N10_L_L1_D0_20200408_1_Labeled.csv\n",
      "[0 1]\n",
      "./Data/U1_G2_N10_L_L1_D0_20200408_1_Labeled.csv\n",
      "[0 2]\n",
      "./Data/U1_G3_N10_L_L1_D0_20200408_1_Labeled.csv\n",
      "[0 3]\n",
      "Training segments: 183\n",
      "Training Segment 1: 6414\n",
      "Training Segment 2: 3504\n",
      "Training Segment 3: 1214\n",
      "Training Segment 4: 2960\n",
      "Training Segment 5: 974\n",
      "Training Segment 6: 3024\n",
      "Training Segment 7: 814\n",
      "Training Segment 8: 3248\n",
      "Training Segment 9: 734\n",
      "Training Segment 10: 2928\n",
      "Training Segment 11: 782\n",
      "Training Segment 12: 2480\n",
      "Training Segment 13: 1102\n",
      "Training Segment 14: 2656\n",
      "Training Segment 15: 750\n",
      "Training Segment 16: 2672\n",
      "Training Segment 17: 606\n",
      "Training Segment 18: 3200\n",
      "Training Segment 19: 350\n",
      "Training Segment 20: 3392\n",
      "Training Segment 21: 878\n",
      "Training Segment 22: 3136\n",
      "Training Segment 23: 686\n",
      "Training Segment 24: 3408\n",
      "Training Segment 25: 558\n",
      "Training Segment 26: 3056\n",
      "Training Segment 27: 798\n",
      "Training Segment 28: 3472\n",
      "Training Segment 29: 670\n",
      "Training Segment 30: 3328\n",
      "Training Segment 31: 446\n",
      "Training Segment 32: 3472\n",
      "Training Segment 33: 798\n",
      "Training Segment 34: 2960\n",
      "Training Segment 35: 1438\n",
      "Training Segment 36: 3184\n",
      "Training Segment 37: 1134\n",
      "Training Segment 38: 2736\n",
      "Training Segment 39: 718\n",
      "Training Segment 40: 3200\n",
      "Training Segment 41: 942\n",
      "Training Segment 42: 3168\n",
      "Training Segment 43: 1454\n",
      "Training Segment 44: 2848\n",
      "Training Segment 45: 1342\n",
      "Training Segment 46: 3168\n",
      "Training Segment 47: 910\n",
      "Training Segment 48: 2624\n",
      "Training Segment 49: 1198\n",
      "Training Segment 50: 3024\n",
      "Training Segment 51: 974\n",
      "Training Segment 52: 3088\n",
      "Training Segment 53: 974\n",
      "Training Segment 54: 3200\n",
      "Training Segment 55: 558\n",
      "Training Segment 56: 3408\n",
      "Training Segment 57: 798\n",
      "Training Segment 58: 2928\n",
      "Training Segment 59: 1038\n",
      "Training Segment 60: 3424\n",
      "Training Segment 61: 7663\n",
      "Training Segment 62: 7870\n",
      "Training Segment 63: 3632\n",
      "Training Segment 64: 1502\n",
      "Training Segment 65: 4240\n",
      "Training Segment 66: 1102\n",
      "Training Segment 67: 3392\n",
      "Training Segment 68: 1582\n",
      "Training Segment 69: 4096\n",
      "Training Segment 70: 1822\n",
      "Training Segment 71: 3520\n",
      "Training Segment 72: 1470\n",
      "Training Segment 73: 3936\n",
      "Training Segment 74: 1118\n",
      "Training Segment 75: 4480\n",
      "Training Segment 76: 1246\n",
      "Training Segment 77: 3552\n",
      "Training Segment 78: 1646\n",
      "Training Segment 79: 3824\n",
      "Training Segment 80: 2094\n",
      "Training Segment 81: 3440\n",
      "Training Segment 82: 2270\n",
      "Training Segment 83: 4160\n",
      "Training Segment 84: 1630\n",
      "Training Segment 85: 4096\n",
      "Training Segment 86: 1630\n",
      "Training Segment 87: 4256\n",
      "Training Segment 88: 1774\n",
      "Training Segment 89: 3472\n",
      "Training Segment 90: 2270\n",
      "Training Segment 91: 3600\n",
      "Training Segment 92: 2110\n",
      "Training Segment 93: 3696\n",
      "Training Segment 94: 2190\n",
      "Training Segment 95: 3888\n",
      "Training Segment 96: 1902\n",
      "Training Segment 97: 4208\n",
      "Training Segment 98: 1886\n",
      "Training Segment 99: 4256\n",
      "Training Segment 100: 2030\n",
      "Training Segment 101: 4048\n",
      "Training Segment 102: 2478\n",
      "Training Segment 103: 4000\n",
      "Training Segment 104: 2238\n",
      "Training Segment 105: 4224\n",
      "Training Segment 106: 3022\n",
      "Training Segment 107: 4480\n",
      "Training Segment 108: 1310\n",
      "Training Segment 109: 4208\n",
      "Training Segment 110: 2174\n",
      "Training Segment 111: 4096\n",
      "Training Segment 112: 2238\n",
      "Training Segment 113: 4144\n",
      "Training Segment 114: 2766\n",
      "Training Segment 115: 4832\n",
      "Training Segment 116: 1454\n",
      "Training Segment 117: 4224\n",
      "Training Segment 118: 4526\n",
      "Training Segment 119: 4336\n",
      "Training Segment 120: 3438\n",
      "Training Segment 121: 4464\n",
      "Training Segment 122: 9807\n",
      "Training Segment 123: 17694\n",
      "Training Segment 124: 2512\n",
      "Training Segment 125: 1758\n",
      "Training Segment 126: 2192\n",
      "Training Segment 127: 1150\n",
      "Training Segment 128: 2592\n",
      "Training Segment 129: 1694\n",
      "Training Segment 130: 2176\n",
      "Training Segment 131: 1198\n",
      "Training Segment 132: 2144\n",
      "Training Segment 133: 1710\n",
      "Training Segment 134: 2032\n",
      "Training Segment 135: 2222\n",
      "Training Segment 136: 2256\n",
      "Training Segment 137: 2110\n",
      "Training Segment 138: 1760\n",
      "Training Segment 139: 2718\n",
      "Training Segment 140: 2176\n",
      "Training Segment 141: 2110\n",
      "Training Segment 142: 2112\n",
      "Training Segment 143: 2798\n",
      "Training Segment 144: 2400\n",
      "Training Segment 145: 2558\n",
      "Training Segment 146: 2208\n",
      "Training Segment 147: 2926\n",
      "Training Segment 148: 2496\n",
      "Training Segment 149: 2590\n",
      "Training Segment 150: 2448\n",
      "Training Segment 151: 2526\n",
      "Training Segment 152: 2416\n",
      "Training Segment 153: 3006\n",
      "Training Segment 154: 2320\n",
      "Training Segment 155: 3518\n",
      "Training Segment 156: 2032\n",
      "Training Segment 157: 5406\n",
      "Training Segment 158: 2240\n",
      "Training Segment 159: 5278\n",
      "Training Segment 160: 2304\n",
      "Training Segment 161: 8094\n",
      "Training Segment 162: 2192\n",
      "Training Segment 163: 22366\n",
      "Training Segment 164: 2352\n",
      "Training Segment 165: 1726\n",
      "Training Segment 166: 2768\n",
      "Training Segment 167: 2430\n",
      "Training Segment 168: 2176\n",
      "Training Segment 169: 3166\n",
      "Training Segment 170: 2208\n",
      "Training Segment 171: 3950\n",
      "Training Segment 172: 3360\n",
      "Training Segment 173: 3278\n",
      "Training Segment 174: 2672\n",
      "Training Segment 175: 2558\n",
      "Training Segment 176: 2400\n",
      "Training Segment 177: 3630\n",
      "Training Segment 178: 1712\n",
      "Training Segment 179: 3838\n",
      "Training Segment 180: 2240\n",
      "Training Segment 181: 2606\n",
      "Training Segment 182: 2416\n",
      "Training Segment 183: 8559\n",
      "Testing segments: 63\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "# Define training and testing files\n",
    "training_files = [\n",
    "    './Data/U1_G1_N30_L_L1_D0_20200408_2_Labeled.csv',\n",
    "    './Data/U1_G2_N30_L_L1_D0_20200408_2_Labeled.csv',\n",
    "    './Data/U1_G3_N30_L_L1_D0_20200408_2_Labeled.csv']\n",
    "\n",
    "testing_files = [\n",
    "    './Data/U1_G1_N10_L_L1_D0_20200408_1_Labeled.csv',\n",
    "    './Data/U1_G2_N10_L_L1_D0_20200408_1_Labeled.csv',\n",
    "    './Data/U1_G3_N10_L_L1_D0_20200408_1_Labeled.csv'\n",
    "]\n",
    "\n",
    "# Function to read and process CSV files\n",
    "def read_csv_file(file_path):\n",
    "    print(file_path)\n",
    "    df = pd.read_csv(file_path)\n",
    "    csi = df.iloc[:, 2:].values  # All columns except 'timestamp' and 'label'\n",
    "    label = df['label'].values # 0: static, 1: up, 2: down, 3: left, 4: right\n",
    "    timestamp = df['timestamp'].values\n",
    "    print(np.unique(label))\n",
    "    return csi, label, timestamp\n",
    "\n",
    "def segment_signals(csi, label, timestamp):\n",
    "    segments = [] # Store segments\n",
    "    segment_label = label[0] # Initialize segment label\n",
    "    segment_start = 0 # Initialize segment start index\n",
    "\n",
    "    for i in range(len(label)): # Iterate through all labels\n",
    "        if label[i] != segment_label: # If the label is different from the current segment label\n",
    "            segments.append((csi[segment_start:i-1], segment_label, timestamp[segment_start:i-1])) # Append the current segment to the segments list\n",
    "            segment_start = i # Update the segment start index\n",
    "            segment_label = label[i] # Update the segment label\n",
    "\n",
    "    segments.append((csi[segment_start:], segment_label, timestamp[segment_start:])) # Append the last segment to the segments list\n",
    "    return segments\n",
    "\n",
    "# Define training and testing segments\n",
    "training_segments = []\n",
    "testing_segments = []\n",
    "\n",
    "# Read and process training files\n",
    "for file in training_files:\n",
    "    s, y, t = read_csv_file(file)\n",
    "    training_segments.extend(segment_signals(s, y, t))\n",
    "\n",
    "# Read and process testing files\n",
    "for file in testing_files:\n",
    "    s, y, t = read_csv_file(file)\n",
    "    testing_segments.extend(segment_signals(s, y, t))\n",
    "\n",
    "# Print sizes of the training segments and testing segments\n",
    "print(f\"Training segments: {len(training_segments)}\")\n",
    "\n",
    "# Print length of all training segments\n",
    "for i, (s, y, t) in enumerate(training_segments):\n",
    "    print(f\"Training Segment {i + 1}: {len(s)}\")\n",
    "\n",
    "# Print size of the testing segments\n",
    "print(f\"Testing segments: {len(testing_segments)}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "数据对齐：通过特征提取使得每一个训练集和测试集的样本长度相同"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from scipy.stats import kurtosis\n",
    "from scipy.stats import skew\n",
    "\n",
    "# Extract features of training segments\n",
    "def extract_features(s):\n",
    "    ''' \n",
    "    Extract features of each segment\n",
    "    features include:\n",
    "    - mean\n",
    "    - std\n",
    "    - max\n",
    "    - min\n",
    "    - median\n",
    "    - kurtosis\n",
    "    - skew\n",
    "    \n",
    "    Input:\n",
    "    s: segment (N*30) in training_segments or testing_segments\n",
    "    \n",
    "    Output:\n",
    "    x: 1-D vector (8*30)\n",
    "    '''\n",
    "    x = []\n",
    "    x.extend(np.mean(s, axis=0))\n",
    "    x.extend(np.std(s, axis=0))\n",
    "    x.extend(np.max(s, axis=0))\n",
    "    x.extend(np.min(s, axis=0))\n",
    "    x.extend(np.median(s, axis=0))\n",
    "    x.extend(kurtosis(s, axis=0))\n",
    "    x.extend(skew(s, axis=0))\n",
    "\n",
    "    return np.array(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用extract_features创建训练集和测试集\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "\n",
    "def one_hot_collate(batch):\n",
    "    data = torch.stack([item[0] for item in batch])\n",
    "    labels = torch.tensor([item[1] for item in batch])\n",
    "    \n",
    "    one_hot_labels = torch.zeros(labels.size(0), 4)  # 4 classes\n",
    "    one_hot_labels.scatter_(1, labels.unsqueeze(1), 1)\n",
    "    return data, one_hot_labels\n",
    "\n",
    "batch_size = 4\n",
    "\n",
    "# Build training dataset\n",
    "trX = [extract_features(s) for s, _, _ in training_segments] # Extract features of training segments\n",
    "trX = torch.tensor(trX, dtype=torch.float32) # Convert trX to tensor\n",
    "trY = [y for _, y, _ in training_segments] # Extract labels of training segments\n",
    "trY = torch.tensor(trY) # Convert trY to tensor\n",
    "\n",
    "# Build testing dataset\n",
    "teX = [extract_features(s) for s, _, _ in testing_segments] # Extract features of testing segments\n",
    "teX = torch.tensor(teX, dtype=torch.float32) # Convert teX to tensor\n",
    "teY = [y for _, y, _ in testing_segments] # Extract labels of testing segments\n",
    "teY = torch.tensor(teY) # Convert teY to tensor\n",
    "\n",
    "# Normalize trX and teX\n",
    "# Calculate mean and standard deviation from the training data\n",
    "mean = trX.mean(dim=0)\n",
    "std = trX.std(dim=0)\n",
    "\n",
    "# Normalize training data\n",
    "trX = (trX - mean) / std\n",
    "\n",
    "# Normalize testing data using training mean and std\n",
    "teX = (teX - mean) / std\n",
    "\n",
    "# Build Dataset\n",
    "trDataset = TensorDataset(trX, trY) # Create training dataset\n",
    "teDataset = TensorDataset(teX, teY) # Create testing dataset\n",
    "\n",
    "# Build loader\n",
    "trLoader = DataLoader(trDataset, batch_size=batch_size, shuffle=True, num_workers=0, collate_fn=one_hot_collate) # Create training dataloader\n",
    "teLoader = DataLoader(teDataset, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=one_hot_collate) # Create testing dataloader"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn\n",
    "\n",
    "class FNN(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, num_classes):\n",
    "        super(FNN, self).__init__()\n",
    "        self.fc1 = nn.Linear(input_size, hidden_size)\n",
    "        self.relu1 = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(hidden_size, hidden_size)\n",
    "        self.relu2 = nn.ReLU()\n",
    "        self.fc3 = nn.Linear(hidden_size, num_classes)\n",
    "        self.softmax = nn.Softmax(dim=1)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.fc1(x)\n",
    "        x = self.relu1(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.relu2(x)\n",
    "        x = self.fc3(x)\n",
    "        out = self.softmax(x)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用Adam作为Optimizor训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "FNN(\n",
      "  (fc1): Linear(in_features=210, out_features=10, bias=True)\n",
      "  (relu1): ReLU()\n",
      "  (fc2): Linear(in_features=10, out_features=10, bias=True)\n",
      "  (relu2): ReLU()\n",
      "  (fc3): Linear(in_features=10, out_features=4, bias=True)\n",
      "  (softmax): Softmax(dim=1)\n",
      ")\n",
      "Epoch [1/200], Train Loss: 1.3195, CV Loss: 1.1956\n",
      "Epoch [2/200], Train Loss: 1.1462, CV Loss: 1.1058\n",
      "Epoch [3/200], Train Loss: 1.0648, CV Loss: 1.0875\n",
      "Epoch [4/200], Train Loss: 1.0258, CV Loss: 1.0725\n",
      "Epoch [5/200], Train Loss: 1.0092, CV Loss: 1.0678\n",
      "Epoch [6/200], Train Loss: 0.9962, CV Loss: 1.0637\n",
      "Epoch [7/200], Train Loss: 0.9891, CV Loss: 1.0621\n",
      "Epoch [8/200], Train Loss: 0.9819, CV Loss: 1.0585\n",
      "Epoch [9/200], Train Loss: 0.9751, CV Loss: 1.0527\n",
      "Epoch [10/200], Train Loss: 0.9679, CV Loss: 1.0491\n",
      "Epoch [11/200], Train Loss: 0.9598, CV Loss: 1.0450\n",
      "Epoch [12/200], Train Loss: 0.9513, CV Loss: 1.0377\n",
      "Epoch [13/200], Train Loss: 0.9398, CV Loss: 1.0351\n",
      "Epoch [14/200], Train Loss: 0.9297, CV Loss: 1.0302\n",
      "Epoch [15/200], Train Loss: 0.9197, CV Loss: 1.0332\n",
      "Epoch [16/200], Train Loss: 0.9114, CV Loss: 1.0332\n",
      "Epoch [17/200], Train Loss: 0.9011, CV Loss: 1.0309\n",
      "Epoch [18/200], Train Loss: 0.8904, CV Loss: 1.0416\n",
      "Epoch [19/200], Train Loss: 0.8771, CV Loss: 1.0509\n",
      "Epoch [20/200], Train Loss: 0.8603, CV Loss: 1.0961\n",
      "Epoch [21/200], Train Loss: 0.8452, CV Loss: 1.0814\n",
      "Epoch [22/200], Train Loss: 0.8262, CV Loss: 1.0745\n",
      "Epoch [23/200], Train Loss: 0.8135, CV Loss: 1.0672\n",
      "Epoch [24/200], Train Loss: 0.8018, CV Loss: 1.0457\n",
      "Epoch [25/200], Train Loss: 0.7945, CV Loss: 1.0533\n",
      "Epoch [26/200], Train Loss: 0.7879, CV Loss: 1.0412\n",
      "Epoch [27/200], Train Loss: 0.7830, CV Loss: 1.0365\n",
      "Epoch [28/200], Train Loss: 0.7790, CV Loss: 1.0366\n",
      "Epoch [29/200], Train Loss: 0.7753, CV Loss: 1.0315\n",
      "Epoch [30/200], Train Loss: 0.7721, CV Loss: 1.0356\n",
      "Epoch [31/200], Train Loss: 0.7697, CV Loss: 1.0519\n",
      "Epoch [32/200], Train Loss: 0.7661, CV Loss: 1.0382\n",
      "Epoch [33/200], Train Loss: 0.7634, CV Loss: 1.0482\n",
      "Epoch [34/200], Train Loss: 0.7616, CV Loss: 1.0476\n",
      "Epoch [35/200], Train Loss: 0.7601, CV Loss: 1.0478\n",
      "Epoch [36/200], Train Loss: 0.7581, CV Loss: 1.0550\n",
      "Epoch [37/200], Train Loss: 0.7564, CV Loss: 1.0514\n",
      "Epoch [38/200], Train Loss: 0.7551, CV Loss: 1.0584\n",
      "Epoch [39/200], Train Loss: 0.7546, CV Loss: 1.0609\n",
      "Epoch [40/200], Train Loss: 0.7555, CV Loss: 1.0612\n",
      "Epoch [41/200], Train Loss: 0.7533, CV Loss: 1.0654\n",
      "Epoch [42/200], Train Loss: 0.7527, CV Loss: 1.0644\n",
      "Epoch [43/200], Train Loss: 0.7524, CV Loss: 1.0677\n",
      "Epoch [44/200], Train Loss: 0.7518, CV Loss: 1.0663\n",
      "Epoch [45/200], Train Loss: 0.7516, CV Loss: 1.0696\n",
      "Epoch [46/200], Train Loss: 0.7513, CV Loss: 1.0713\n",
      "Epoch [47/200], Train Loss: 0.7511, CV Loss: 1.0710\n",
      "Epoch [48/200], Train Loss: 0.7510, CV Loss: 1.0696\n",
      "Epoch [49/200], Train Loss: 0.7508, CV Loss: 1.0722\n",
      "Epoch [50/200], Train Loss: 0.7507, CV Loss: 1.0728\n",
      "Epoch [51/200], Train Loss: 0.7504, CV Loss: 1.0742\n",
      "Epoch [52/200], Train Loss: 0.7503, CV Loss: 1.0743\n",
      "Epoch [53/200], Train Loss: 0.7502, CV Loss: 1.0743\n",
      "Epoch [54/200], Train Loss: 0.7501, CV Loss: 1.0741\n",
      "Epoch [55/200], Train Loss: 0.7500, CV Loss: 1.0761\n",
      "Epoch [56/200], Train Loss: 0.7500, CV Loss: 1.0766\n",
      "Epoch [57/200], Train Loss: 0.7498, CV Loss: 1.0761\n",
      "Epoch [58/200], Train Loss: 0.7498, CV Loss: 1.0756\n",
      "Epoch [59/200], Train Loss: 0.7497, CV Loss: 1.0780\n",
      "Epoch [60/200], Train Loss: 0.7496, CV Loss: 1.0772\n",
      "Epoch [61/200], Train Loss: 0.7496, CV Loss: 1.0789\n",
      "Epoch [62/200], Train Loss: 0.7495, CV Loss: 1.0789\n",
      "Epoch [63/200], Train Loss: 0.7495, CV Loss: 1.0797\n",
      "Epoch [64/200], Train Loss: 0.7494, CV Loss: 1.0795\n",
      "Epoch [65/200], Train Loss: 0.7494, CV Loss: 1.0812\n",
      "Epoch [66/200], Train Loss: 0.7493, CV Loss: 1.0801\n",
      "Epoch [67/200], Train Loss: 0.7493, CV Loss: 1.0795\n",
      "Epoch [68/200], Train Loss: 0.7493, CV Loss: 1.0791\n",
      "Epoch [69/200], Train Loss: 0.7492, CV Loss: 1.0779\n",
      "Epoch [70/200], Train Loss: 0.7492, CV Loss: 1.0779\n",
      "Epoch [71/200], Train Loss: 0.7492, CV Loss: 1.0776\n",
      "Epoch [72/200], Train Loss: 0.7492, CV Loss: 1.0779\n",
      "Epoch [73/200], Train Loss: 0.7491, CV Loss: 1.0773\n",
      "Epoch [74/200], Train Loss: 0.7491, CV Loss: 1.0775\n",
      "Epoch [75/200], Train Loss: 0.7491, CV Loss: 1.0772\n",
      "Epoch [76/200], Train Loss: 0.7491, CV Loss: 1.0771\n",
      "Epoch [77/200], Train Loss: 0.7507, CV Loss: 1.0773\n",
      "Epoch [78/200], Train Loss: 0.7507, CV Loss: 1.0775\n",
      "Epoch [79/200], Train Loss: 0.7490, CV Loss: 1.0780\n",
      "Epoch [80/200], Train Loss: 0.7490, CV Loss: 1.0764\n",
      "Epoch [81/200], Train Loss: 0.7490, CV Loss: 1.0772\n",
      "Epoch [82/200], Train Loss: 0.7490, CV Loss: 1.0770\n",
      "Epoch [83/200], Train Loss: 0.7490, CV Loss: 1.0770\n",
      "Epoch [84/200], Train Loss: 0.7490, CV Loss: 1.0773\n",
      "Epoch [85/200], Train Loss: 0.7489, CV Loss: 1.0775\n",
      "Epoch [86/200], Train Loss: 0.7489, CV Loss: 1.0769\n",
      "Epoch [87/200], Train Loss: 0.7489, CV Loss: 1.0774\n",
      "Epoch [88/200], Train Loss: 0.7489, CV Loss: 1.0770\n",
      "Epoch [89/200], Train Loss: 0.7489, CV Loss: 1.0767\n",
      "Epoch [90/200], Train Loss: 0.7489, CV Loss: 1.0770\n",
      "Epoch [91/200], Train Loss: 0.7489, CV Loss: 1.0770\n",
      "Epoch [92/200], Train Loss: 0.7489, CV Loss: 1.0762\n",
      "Epoch [93/200], Train Loss: 0.7489, CV Loss: 1.0764\n",
      "Epoch [94/200], Train Loss: 0.7489, CV Loss: 1.0769\n",
      "Epoch [95/200], Train Loss: 0.7489, CV Loss: 1.0765\n",
      "Epoch [96/200], Train Loss: 0.7489, CV Loss: 1.0770\n",
      "Epoch [97/200], Train Loss: 0.7488, CV Loss: 1.0770\n",
      "Epoch [98/200], Train Loss: 0.7505, CV Loss: 1.0769\n",
      "Epoch [99/200], Train Loss: 0.7488, CV Loss: 1.0766\n",
      "Epoch [100/200], Train Loss: 0.7488, CV Loss: 1.0768\n",
      "Epoch [101/200], Train Loss: 0.7488, CV Loss: 1.0761\n",
      "Epoch [102/200], Train Loss: 0.7488, CV Loss: 1.0767\n",
      "Epoch [103/200], Train Loss: 0.7505, CV Loss: 1.0765\n",
      "Epoch [104/200], Train Loss: 0.7488, CV Loss: 1.0760\n",
      "Epoch [105/200], Train Loss: 0.7488, CV Loss: 1.0763\n",
      "Epoch [106/200], Train Loss: 0.7488, CV Loss: 1.0762\n",
      "Epoch [107/200], Train Loss: 0.7488, CV Loss: 1.0759\n",
      "Epoch [108/200], Train Loss: 0.7488, CV Loss: 1.0763\n",
      "Epoch [109/200], Train Loss: 0.7488, CV Loss: 1.0763\n",
      "Epoch [110/200], Train Loss: 0.7488, CV Loss: 1.0756\n",
      "Epoch [111/200], Train Loss: 0.7488, CV Loss: 1.0759\n",
      "Epoch [112/200], Train Loss: 0.7488, CV Loss: 1.0755\n",
      "Epoch [113/200], Train Loss: 0.7488, CV Loss: 1.0760\n",
      "Epoch [114/200], Train Loss: 0.7488, CV Loss: 1.0755\n",
      "Epoch [115/200], Train Loss: 0.7488, CV Loss: 1.0747\n",
      "Epoch [116/200], Train Loss: 0.7488, CV Loss: 1.0750\n",
      "Epoch [117/200], Train Loss: 0.7488, CV Loss: 1.0752\n",
      "Epoch [118/200], Train Loss: 0.7488, CV Loss: 1.0752\n",
      "Epoch [119/200], Train Loss: 0.7488, CV Loss: 1.0748\n",
      "Epoch [120/200], Train Loss: 0.7488, CV Loss: 1.0751\n",
      "Epoch [121/200], Train Loss: 0.7488, CV Loss: 1.0746\n",
      "Epoch [122/200], Train Loss: 0.7488, CV Loss: 1.0749\n",
      "Epoch [123/200], Train Loss: 0.7488, CV Loss: 1.0749\n",
      "Epoch [124/200], Train Loss: 0.7488, CV Loss: 1.0746\n",
      "Epoch [125/200], Train Loss: 0.7487, CV Loss: 1.0745\n",
      "Epoch [126/200], Train Loss: 0.7487, CV Loss: 1.0742\n",
      "Epoch [127/200], Train Loss: 0.7487, CV Loss: 1.0742\n",
      "Epoch [128/200], Train Loss: 0.7504, CV Loss: 1.0740\n",
      "Epoch [129/200], Train Loss: 0.7488, CV Loss: 1.0735\n",
      "Epoch [130/200], Train Loss: 0.7488, CV Loss: 1.0734\n",
      "Epoch [131/200], Train Loss: 0.7488, CV Loss: 1.0715\n",
      "Epoch [132/200], Train Loss: 0.7505, CV Loss: 1.0702\n",
      "Epoch [133/200], Train Loss: 0.7488, CV Loss: 1.0687\n",
      "Epoch [134/200], Train Loss: 0.7487, CV Loss: 1.0669\n",
      "Epoch [135/200], Train Loss: 0.7487, CV Loss: 1.0681\n",
      "Epoch [136/200], Train Loss: 0.7488, CV Loss: 1.0643\n",
      "Epoch [137/200], Train Loss: 0.7488, CV Loss: 1.0693\n",
      "Epoch [138/200], Train Loss: 0.7505, CV Loss: 1.0691\n",
      "Epoch [139/200], Train Loss: 0.7489, CV Loss: 1.0680\n",
      "Epoch [140/200], Train Loss: 0.7488, CV Loss: 1.0677\n",
      "Epoch [141/200], Train Loss: 0.7487, CV Loss: 1.0669\n",
      "Epoch [142/200], Train Loss: 0.7487, CV Loss: 1.0687\n",
      "Epoch [143/200], Train Loss: 0.7487, CV Loss: 1.0667\n",
      "Epoch [144/200], Train Loss: 0.7488, CV Loss: 1.0703\n",
      "Epoch [145/200], Train Loss: 0.7488, CV Loss: 1.0647\n",
      "Epoch [146/200], Train Loss: 0.7488, CV Loss: 1.0703\n",
      "Epoch [147/200], Train Loss: 0.7505, CV Loss: 1.0708\n",
      "Epoch [148/200], Train Loss: 0.7488, CV Loss: 1.0703\n",
      "Epoch [149/200], Train Loss: 0.7487, CV Loss: 1.0689\n",
      "Epoch [150/200], Train Loss: 0.7487, CV Loss: 1.0705\n",
      "Epoch [151/200], Train Loss: 0.7487, CV Loss: 1.0687\n",
      "Epoch [152/200], Train Loss: 0.7488, CV Loss: 1.0689\n",
      "Epoch [153/200], Train Loss: 0.7488, CV Loss: 1.0634\n",
      "Epoch [154/200], Train Loss: 0.7488, CV Loss: 1.0637\n",
      "Epoch [155/200], Train Loss: 0.7488, CV Loss: 1.0661\n",
      "Epoch [156/200], Train Loss: 0.7488, CV Loss: 1.0664\n",
      "Epoch [157/200], Train Loss: 0.7488, CV Loss: 1.0629\n",
      "Epoch [158/200], Train Loss: 0.7488, CV Loss: 1.0678\n",
      "Epoch [159/200], Train Loss: 0.7488, CV Loss: 1.0637\n",
      "Epoch [160/200], Train Loss: 0.7488, CV Loss: 1.0636\n",
      "Epoch [161/200], Train Loss: 0.7488, CV Loss: 1.0597\n",
      "Epoch [162/200], Train Loss: 0.7488, CV Loss: 1.0644\n",
      "Epoch [163/200], Train Loss: 0.7488, CV Loss: 1.0605\n",
      "Epoch [164/200], Train Loss: 0.7488, CV Loss: 1.0649\n",
      "Epoch [165/200], Train Loss: 0.7488, CV Loss: 1.0592\n",
      "Epoch [166/200], Train Loss: 0.7488, CV Loss: 1.0601\n",
      "Epoch [167/200], Train Loss: 0.7488, CV Loss: 1.0623\n",
      "Epoch [168/200], Train Loss: 0.7488, CV Loss: 1.0618\n",
      "Epoch [169/200], Train Loss: 0.7487, CV Loss: 1.0664\n",
      "Epoch [170/200], Train Loss: 0.7488, CV Loss: 1.0596\n",
      "Epoch [171/200], Train Loss: 0.7488, CV Loss: 1.0655\n",
      "Epoch [172/200], Train Loss: 0.7488, CV Loss: 1.0603\n",
      "Epoch [173/200], Train Loss: 0.7488, CV Loss: 1.0659\n",
      "Epoch [174/200], Train Loss: 0.7488, CV Loss: 1.0603\n",
      "Epoch [175/200], Train Loss: 0.7488, CV Loss: 1.0611\n",
      "Epoch [176/200], Train Loss: 0.7488, CV Loss: 1.0597\n",
      "Epoch [177/200], Train Loss: 0.7488, CV Loss: 1.0626\n",
      "Epoch [178/200], Train Loss: 0.7488, CV Loss: 1.0595\n",
      "Epoch [179/200], Train Loss: 0.7488, CV Loss: 1.0647\n",
      "Epoch [180/200], Train Loss: 0.7488, CV Loss: 1.0614\n",
      "Epoch [181/200], Train Loss: 0.7488, CV Loss: 1.0637\n",
      "Epoch [182/200], Train Loss: 0.7488, CV Loss: 1.0654\n",
      "Epoch [183/200], Train Loss: 0.7488, CV Loss: 1.0639\n",
      "Epoch [184/200], Train Loss: 0.7488, CV Loss: 1.0586\n",
      "Epoch [185/200], Train Loss: 0.7488, CV Loss: 1.0649\n",
      "Epoch [186/200], Train Loss: 0.7488, CV Loss: 1.0586\n",
      "Epoch [187/200], Train Loss: 0.7488, CV Loss: 1.0643\n",
      "Epoch [188/200], Train Loss: 0.7488, CV Loss: 1.0601\n",
      "Epoch [189/200], Train Loss: 0.7488, CV Loss: 1.0641\n",
      "Epoch [190/200], Train Loss: 0.7488, CV Loss: 1.0605\n",
      "Epoch [191/200], Train Loss: 0.7488, CV Loss: 1.0619\n",
      "Epoch [192/200], Train Loss: 0.7488, CV Loss: 1.0583\n",
      "Epoch [193/200], Train Loss: 0.7488, CV Loss: 1.0631\n",
      "Epoch [194/200], Train Loss: 0.7488, CV Loss: 1.0573\n",
      "Epoch [195/200], Train Loss: 0.7488, CV Loss: 1.0606\n",
      "Epoch [196/200], Train Loss: 0.7505, CV Loss: 1.0620\n",
      "Epoch [197/200], Train Loss: 0.7506, CV Loss: 1.0538\n",
      "Epoch [198/200], Train Loss: 0.7487, CV Loss: 1.0558\n",
      "Epoch [199/200], Train Loss: 0.7489, CV Loss: 1.0525\n",
      "Epoch [200/200], Train Loss: 0.7487, CV Loss: 1.0505\n"
     ]
    }
   ],
   "source": [
    "# Define the model parameters\n",
    "hidden_size = 10\n",
    "\n",
    "# Instantiate the model\n",
    "input_size = trX.shape[1]\n",
    "num_classes = 4 # 3 movements and static\n",
    "model = FNN(input_size, hidden_size, num_classes)\n",
    "print(model)\n",
    "\n",
    "# Define loss function and optimizer\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters())\n",
    "\n",
    "# Lists to store losses\n",
    "train_losses = []\n",
    "te_losses = []\n",
    "\n",
    "# Number of epochs\n",
    "num_epochs = 200\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    batch_losses = []\n",
    "    \n",
    "    for batch_x, batch_y in trLoader:\n",
    "        # Forward pass\n",
    "        outputs = model(batch_x)\n",
    "        loss = criterion(outputs, batch_y)\n",
    "        \n",
    "        # Backward pass and optimize\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        batch_losses.append(loss.item())\n",
    "    \n",
    "    # Calculate average training loss for this epoch\n",
    "    avg_train_loss = sum(batch_losses) / len(batch_losses)\n",
    "    train_losses.append(avg_train_loss)\n",
    "    \n",
    "    # Evaluate on cross-validation set\n",
    "    model.eval()\n",
    "    te_batch_losses = []\n",
    "    with torch.no_grad():\n",
    "        for te_x, te_y in teLoader:\n",
    "            te_outputs = model(te_x)\n",
    "            te_loss = criterion(te_outputs, te_y)\n",
    "            te_batch_losses.append(te_loss.item())\n",
    "    \n",
    "    avg_te_loss = sum(te_batch_losses) / len(te_batch_losses)\n",
    "    te_losses.append(avg_te_loss)\n",
    "    \n",
    "    print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {avg_train_loss:.4f}, CV Loss: {avg_te_loss:.4f}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "计算精度与学习曲线"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# Calculate and print accuracies for training and cross-validation sets\n",
    "model.eval()\n",
    "with torch.no_grad():\n",
    "    # Training set accuracy\n",
    "    tr_correct = 0\n",
    "    tr_total = 0\n",
    "    for images, labels in trLoader:\n",
    "        outputs = model(images)\n",
    "        _, predicted = torch.max(outputs, 1)\n",
    "        _, true_labels = torch.max(labels, 1)\n",
    "        tr_total += labels.size(0)\n",
    "        tr_correct += (predicted == true_labels).sum().item()\n",
    "    \n",
    "    tr_accuracy = 100 * tr_correct / tr_total\n",
    "    \n",
    "    # test set accuracy\n",
    "    te_correct = 0\n",
    "    te_total = 0\n",
    "    for images, labels in teLoader:\n",
    "        outputs = model(images)\n",
    "        _, predicted = torch.max(outputs, 1)\n",
    "        _, true_labels = torch.max(labels, 1)\n",
    "        te_total += labels.size(0)\n",
    "        te_correct += (predicted == true_labels).sum().item()\n",
    "    \n",
    "    te_accuracy = 100 * te_correct / te_total\n",
    "\n",
    "print(f'Accuracy on training set: {tr_accuracy:.2f}%')\n",
    "print(f'Accuracy on cross-validation set: {te_accuracy:.2f}%')\n",
    "\n",
    "# Plot training and cross-validation losses\n",
    "plt.figure(figsize=(10, 5))\n",
    "plt.plot(range(1, num_epochs+1), train_losses, label='Training Loss')\n",
    "plt.plot(range(1, num_epochs+1), te_losses, label='Testing Loss')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Loss')\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "authorship_tag": "ABX9TyO5gS9/MePw+FDiXJA07L6y",
   "include_colab_link": true,
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
