{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b65dfa04-e3e9-4568-9821-21a8a84a8776",
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据准备 载入csv数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "f558a7ec-f277-4ba9-a763-cb2737dd5098",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "D:/U1_G1_N30_L_L1_D0_20200408_2_Labeled.csv\n",
      "[0 1]\n",
      "D:/U1_G2_N30_L_L1_D0_20200408_2_Labeled.csv\n",
      "[0 2]\n",
      "D:/U1_G3_N30_L_L1_D0_20200408_2_Labeled.csv\n",
      "[0 3]\n",
      "D:/U1_G1_N10_L_L1_D0_20200408_1_Labeled.csv\n",
      "[0 1]\n",
      "D:/U1_G2_N10_L_L1_D0_20200408_1_Labeled.csv\n",
      "[0 2]\n",
      "D:/U1_G3_N10_L_L1_D0_20200408_1_Labeled.csv\n",
      "[0 3]\n",
      "Training segments: 183\n",
      "Training Segment 1: 6414\n",
      "Training Segment 2: 3504\n",
      "Training Segment 3: 1214\n",
      "Training Segment 4: 2960\n",
      "Training Segment 5: 974\n",
      "Training Segment 6: 3024\n",
      "Training Segment 7: 814\n",
      "Training Segment 8: 3248\n",
      "Training Segment 9: 734\n",
      "Training Segment 10: 2928\n",
      "Training Segment 11: 782\n",
      "Training Segment 12: 2480\n",
      "Training Segment 13: 1102\n",
      "Training Segment 14: 2656\n",
      "Training Segment 15: 750\n",
      "Training Segment 16: 2672\n",
      "Training Segment 17: 606\n",
      "Training Segment 18: 3200\n",
      "Training Segment 19: 350\n",
      "Training Segment 20: 3392\n",
      "Training Segment 21: 878\n",
      "Training Segment 22: 3136\n",
      "Training Segment 23: 686\n",
      "Training Segment 24: 3408\n",
      "Training Segment 25: 558\n",
      "Training Segment 26: 3056\n",
      "Training Segment 27: 798\n",
      "Training Segment 28: 3472\n",
      "Training Segment 29: 670\n",
      "Training Segment 30: 3328\n",
      "Training Segment 31: 446\n",
      "Training Segment 32: 3472\n",
      "Training Segment 33: 798\n",
      "Training Segment 34: 2960\n",
      "Training Segment 35: 1438\n",
      "Training Segment 36: 3184\n",
      "Training Segment 37: 1134\n",
      "Training Segment 38: 2736\n",
      "Training Segment 39: 718\n",
      "Training Segment 40: 3200\n",
      "Training Segment 41: 942\n",
      "Training Segment 42: 3168\n",
      "Training Segment 43: 1454\n",
      "Training Segment 44: 2848\n",
      "Training Segment 45: 1342\n",
      "Training Segment 46: 3168\n",
      "Training Segment 47: 910\n",
      "Training Segment 48: 2624\n",
      "Training Segment 49: 1198\n",
      "Training Segment 50: 3024\n",
      "Training Segment 51: 974\n",
      "Training Segment 52: 3088\n",
      "Training Segment 53: 974\n",
      "Training Segment 54: 3200\n",
      "Training Segment 55: 558\n",
      "Training Segment 56: 3408\n",
      "Training Segment 57: 798\n",
      "Training Segment 58: 2928\n",
      "Training Segment 59: 1038\n",
      "Training Segment 60: 3424\n",
      "Training Segment 61: 7663\n",
      "Training Segment 62: 7870\n",
      "Training Segment 63: 3632\n",
      "Training Segment 64: 1502\n",
      "Training Segment 65: 4240\n",
      "Training Segment 66: 1102\n",
      "Training Segment 67: 3392\n",
      "Training Segment 68: 1582\n",
      "Training Segment 69: 4096\n",
      "Training Segment 70: 1822\n",
      "Training Segment 71: 3520\n",
      "Training Segment 72: 1470\n",
      "Training Segment 73: 3936\n",
      "Training Segment 74: 1118\n",
      "Training Segment 75: 4480\n",
      "Training Segment 76: 1246\n",
      "Training Segment 77: 3552\n",
      "Training Segment 78: 1646\n",
      "Training Segment 79: 3824\n",
      "Training Segment 80: 2094\n",
      "Training Segment 81: 3440\n",
      "Training Segment 82: 2270\n",
      "Training Segment 83: 4160\n",
      "Training Segment 84: 1630\n",
      "Training Segment 85: 4096\n",
      "Training Segment 86: 1630\n",
      "Training Segment 87: 4256\n",
      "Training Segment 88: 1774\n",
      "Training Segment 89: 3472\n",
      "Training Segment 90: 2270\n",
      "Training Segment 91: 3600\n",
      "Training Segment 92: 2110\n",
      "Training Segment 93: 3696\n",
      "Training Segment 94: 2190\n",
      "Training Segment 95: 3888\n",
      "Training Segment 96: 1902\n",
      "Training Segment 97: 4208\n",
      "Training Segment 98: 1886\n",
      "Training Segment 99: 4256\n",
      "Training Segment 100: 2030\n",
      "Training Segment 101: 4048\n",
      "Training Segment 102: 2478\n",
      "Training Segment 103: 4000\n",
      "Training Segment 104: 2238\n",
      "Training Segment 105: 4224\n",
      "Training Segment 106: 3022\n",
      "Training Segment 107: 4480\n",
      "Training Segment 108: 1310\n",
      "Training Segment 109: 4208\n",
      "Training Segment 110: 2174\n",
      "Training Segment 111: 4096\n",
      "Training Segment 112: 2238\n",
      "Training Segment 113: 4144\n",
      "Training Segment 114: 2766\n",
      "Training Segment 115: 4832\n",
      "Training Segment 116: 1454\n",
      "Training Segment 117: 4224\n",
      "Training Segment 118: 4526\n",
      "Training Segment 119: 4336\n",
      "Training Segment 120: 3438\n",
      "Training Segment 121: 4464\n",
      "Training Segment 122: 9807\n",
      "Training Segment 123: 17694\n",
      "Training Segment 124: 2512\n",
      "Training Segment 125: 1758\n",
      "Training Segment 126: 2192\n",
      "Training Segment 127: 1150\n",
      "Training Segment 128: 2592\n",
      "Training Segment 129: 1694\n",
      "Training Segment 130: 2176\n",
      "Training Segment 131: 1198\n",
      "Training Segment 132: 2144\n",
      "Training Segment 133: 1710\n",
      "Training Segment 134: 2032\n",
      "Training Segment 135: 2222\n",
      "Training Segment 136: 2256\n",
      "Training Segment 137: 2110\n",
      "Training Segment 138: 1760\n",
      "Training Segment 139: 2718\n",
      "Training Segment 140: 2176\n",
      "Training Segment 141: 2110\n",
      "Training Segment 142: 2112\n",
      "Training Segment 143: 2798\n",
      "Training Segment 144: 2400\n",
      "Training Segment 145: 2558\n",
      "Training Segment 146: 2208\n",
      "Training Segment 147: 2926\n",
      "Training Segment 148: 2496\n",
      "Training Segment 149: 2590\n",
      "Training Segment 150: 2448\n",
      "Training Segment 151: 2526\n",
      "Training Segment 152: 2416\n",
      "Training Segment 153: 3006\n",
      "Training Segment 154: 2320\n",
      "Training Segment 155: 3518\n",
      "Training Segment 156: 2032\n",
      "Training Segment 157: 5406\n",
      "Training Segment 158: 2240\n",
      "Training Segment 159: 5278\n",
      "Training Segment 160: 2304\n",
      "Training Segment 161: 8094\n",
      "Training Segment 162: 2192\n",
      "Training Segment 163: 22366\n",
      "Training Segment 164: 2352\n",
      "Training Segment 165: 1726\n",
      "Training Segment 166: 2768\n",
      "Training Segment 167: 2430\n",
      "Training Segment 168: 2176\n",
      "Training Segment 169: 3166\n",
      "Training Segment 170: 2208\n",
      "Training Segment 171: 3950\n",
      "Training Segment 172: 3360\n",
      "Training Segment 173: 3278\n",
      "Training Segment 174: 2672\n",
      "Training Segment 175: 2558\n",
      "Training Segment 176: 2400\n",
      "Training Segment 177: 3630\n",
      "Training Segment 178: 1712\n",
      "Training Segment 179: 3838\n",
      "Training Segment 180: 2240\n",
      "Training Segment 181: 2606\n",
      "Training Segment 182: 2416\n",
      "Training Segment 183: 8559\n",
      "Testing segments: 63\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "# Define training and testing files\n",
    "training_files = [\n",
    "    'D:/U1_G1_N30_L_L1_D0_20200408_2_Labeled.csv',\n",
    "    'D:/U1_G2_N30_L_L1_D0_20200408_2_Labeled.csv',\n",
    "    'D:/U1_G3_N30_L_L1_D0_20200408_2_Labeled.csv']\n",
    "\n",
    "testing_files = [\n",
    "    'D:/U1_G1_N10_L_L1_D0_20200408_1_Labeled.csv',\n",
    "    'D:/U1_G2_N10_L_L1_D0_20200408_1_Labeled.csv',\n",
    "    'D:/U1_G3_N10_L_L1_D0_20200408_1_Labeled.csv'\n",
    "]\n",
    "\n",
    "# Function to read and process CSV files\n",
    "def read_csv_file(file_path):\n",
    "    print(file_path)\n",
    "    df = pd.read_csv(file_path)\n",
    "    csi = df.iloc[:, 2:].values  # All columns except 'timestamp' and 'label'\n",
    "    label = df['label'].values # 0: static, 1: up, 2: down, 3: left, 4: right\n",
    "    timestamp = df['timestamp'].values\n",
    "    print(np.unique(label))\n",
    "    return csi, label, timestamp\n",
    "\n",
    "def segment_signals(csi, label, timestamp):\n",
    "    segments = [] # Store segments\n",
    "    segment_label = label[0] # Initialize segment label\n",
    "    segment_start = 0 # Initialize segment start index\n",
    "\n",
    "    for i in range(len(label)): # Iterate through all labels\n",
    "        if label[i] != segment_label: # If the label is different from the current segment label\n",
    "            segments.append((csi[segment_start:i-1], segment_label, timestamp[segment_start:i-1])) # Append the current segment to the segments list\n",
    "            segment_start = i # Update the segment start index\n",
    "            segment_label = label[i] # Update the segment label\n",
    "\n",
    "    segments.append((csi[segment_start:], segment_label, timestamp[segment_start:])) # Append the last segment to the segments list\n",
    "    return segments\n",
    "\n",
    "# Define training and testing segments\n",
    "training_segments = []\n",
    "testing_segments = []\n",
    "\n",
    "# Read and process training files\n",
    "for file in training_files:\n",
    "    s, y, t = read_csv_file(file)\n",
    "    training_segments.extend(segment_signals(s, y, t))\n",
    "\n",
    "# Read and process testing files\n",
    "for file in testing_files:\n",
    "    s, y, t = read_csv_file(file)\n",
    "    testing_segments.extend(segment_signals(s, y, t))\n",
    "\n",
    "# Print sizes of the training segments and testing segments\n",
    "print(f\"Training segments: {len(training_segments)}\")\n",
    "\n",
    "# Print length of all training segments\n",
    "for i, (s, y, t) in enumerate(training_segments):\n",
    "    print(f\"Training Segment {i + 1}: {len(s)}\")\n",
    "\n",
    "# Print size of the testing segments\n",
    "print(f\"Testing segments: {len(testing_segments)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "33f9a16e-37d9-49a2-807c-027cb6d887d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据对齐：通过特征提取使得每一个训练集和测试集的样本长度相同"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "b72f02b2-3289-4bd3-a1ba-309363b1556d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from scipy.stats import kurtosis\n",
    "from scipy.stats import skew\n",
    "\n",
    "# Extract features of training segments\n",
    "def extract_features(s):\n",
    "    ''' \n",
    "    Extract features of each segment\n",
    "    features include:\n",
    "    - mean\n",
    "    - std\n",
    "    - max\n",
    "    - min\n",
    "    - median\n",
    "    - kurtosis\n",
    "    - skew\n",
    "    \n",
    "    Input:\n",
    "    s: segment (N*30) in training_segments or testing_segments\n",
    "    \n",
    "    Output:\n",
    "    x: 1-D vector (8*30)\n",
    "    '''\n",
    "    x = []\n",
    "    x.extend(np.mean(s, axis=0))\n",
    "    x.extend(np.std(s, axis=0))\n",
    "    x.extend(np.max(s, axis=0))\n",
    "    x.extend(np.min(s, axis=0))\n",
    "    x.extend(np.median(s, axis=0))\n",
    "    x.extend(kurtosis(s, axis=0))\n",
    "    x.extend(skew(s, axis=0))\n",
    "\n",
    "    return np.array(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "69f2257b-b1d7-435a-98d3-4300678c8953",
   "metadata": {},
   "outputs": [],
   "source": [
    "#使用extract_features创建训练集和测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "29f04320-5c9b-4601-aceb-0e8200e11132",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Temp\\ipykernel_3484\\496475029.py:16: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\builder\\windows\\pytorch\\torch\\csrc\\utils\\tensor_new.cpp:281.)\n",
      "  trX = torch.tensor(trX, dtype=torch.float32) # Convert trX to tensor\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "\n",
    "def one_hot_collate(batch):\n",
    "    data = torch.stack([item[0] for item in batch])\n",
    "    labels = torch.tensor([item[1] for item in batch])\n",
    "    \n",
    "    one_hot_labels = torch.zeros(labels.size(0), 4)  # 4 classes\n",
    "    one_hot_labels.scatter_(1, labels.unsqueeze(1), 1)\n",
    "    return data, one_hot_labels\n",
    "\n",
    "batch_size = 4\n",
    "\n",
    "# Build training dataset\n",
    "trX = [extract_features(s) for s, _, _ in training_segments] # Extract features of training segments\n",
    "trX = torch.tensor(trX, dtype=torch.float32) # Convert trX to tensor\n",
    "trY = [y for _, y, _ in training_segments] # Extract labels of training segments\n",
    "trY = torch.tensor(trY) # Convert trY to tensor\n",
    "\n",
    "# Build testing dataset\n",
    "teX = [extract_features(s) for s, _, _ in testing_segments] # Extract features of testing segments\n",
    "teX = torch.tensor(teX, dtype=torch.float32) # Convert teX to tensor\n",
    "teY = [y for _, y, _ in testing_segments] # Extract labels of testing segments\n",
    "teY = torch.tensor(teY) # Convert teY to tensor\n",
    "\n",
    "# Normalize trX and teX\n",
    "# Calculate mean and standard deviation from the training data\n",
    "mean = trX.mean(dim=0)\n",
    "std = trX.std(dim=0)\n",
    "\n",
    "# Normalize training data\n",
    "trX = (trX - mean) / std\n",
    "\n",
    "# Normalize testing data using training mean and std\n",
    "teX = (teX - mean) / std\n",
    "\n",
    "# Build Dataset\n",
    "trDataset = TensorDataset(trX, trY) # Create training dataset\n",
    "teDataset = TensorDataset(teX, teY) # Create testing dataset\n",
    "\n",
    "# Build loader\n",
    "trLoader = DataLoader(trDataset, batch_size=batch_size, shuffle=True, num_workers=0, collate_fn=one_hot_collate) # Create training dataloader\n",
    "teLoader = DataLoader(teDataset, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=one_hot_collate) # Create testing dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ec00009e-614f-4954-88a5-49fc3b1b6459",
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "4154b7ab-26d3-46cc-96e6-883b41798aad",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn\n",
    "\n",
    "class FNN(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, num_classes):\n",
    "        super(FNN, self).__init__()\n",
    "        self.fc1 = nn.Linear(input_size, hidden_size)\n",
    "        self.relu1 = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(hidden_size, hidden_size)\n",
    "        self.relu2 = nn.ReLU()\n",
    "        self.fc3 = nn.Linear(hidden_size, num_classes)\n",
    "        self.softmax = nn.Softmax(dim=1)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.fc1(x)\n",
    "        x = self.relu1(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.relu2(x)\n",
    "        x = self.fc3(x)\n",
    "        out = self.softmax(x)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "25ea76df-8a15-407d-9fef-f8b72b00a63a",
   "metadata": {},
   "outputs": [],
   "source": [
    "#使用Adam作为Optimizor训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "5aef30dc-3b57-46b0-8a28-4c8fd6deeb11",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "FNN(\n",
      "  (fc1): Linear(in_features=210, out_features=10, bias=True)\n",
      "  (relu1): ReLU()\n",
      "  (fc2): Linear(in_features=10, out_features=10, bias=True)\n",
      "  (relu2): ReLU()\n",
      "  (fc3): Linear(in_features=10, out_features=4, bias=True)\n",
      "  (softmax): Softmax(dim=1)\n",
      ")\n",
      "Epoch [1/200], Train Loss: 1.3142, CV Loss: 1.2705\n",
      "Epoch [2/200], Train Loss: 1.1870, CV Loss: 1.1364\n",
      "Epoch [3/200], Train Loss: 1.0779, CV Loss: 1.0786\n",
      "Epoch [4/200], Train Loss: 1.0310, CV Loss: 1.0705\n",
      "Epoch [5/200], Train Loss: 1.0090, CV Loss: 1.0674\n",
      "Epoch [6/200], Train Loss: 0.9849, CV Loss: 1.0681\n",
      "Epoch [7/200], Train Loss: 0.9557, CV Loss: 1.0691\n",
      "Epoch [8/200], Train Loss: 0.9352, CV Loss: 1.0702\n",
      "Epoch [9/200], Train Loss: 0.9194, CV Loss: 1.0647\n",
      "Epoch [10/200], Train Loss: 0.9124, CV Loss: 1.0628\n",
      "Epoch [11/200], Train Loss: 0.8998, CV Loss: 1.0545\n",
      "Epoch [12/200], Train Loss: 0.8850, CV Loss: 1.0416\n",
      "Epoch [13/200], Train Loss: 0.8662, CV Loss: 1.0296\n",
      "Epoch [14/200], Train Loss: 0.8504, CV Loss: 1.0335\n",
      "Epoch [15/200], Train Loss: 0.8375, CV Loss: 1.0273\n",
      "Epoch [16/200], Train Loss: 0.8245, CV Loss: 1.0183\n",
      "Epoch [17/200], Train Loss: 0.8169, CV Loss: 1.0245\n",
      "Epoch [18/200], Train Loss: 0.8046, CV Loss: 1.0192\n",
      "Epoch [19/200], Train Loss: 0.7988, CV Loss: 1.0169\n",
      "Epoch [20/200], Train Loss: 0.7959, CV Loss: 1.0188\n",
      "Epoch [21/200], Train Loss: 0.7896, CV Loss: 1.0207\n",
      "Epoch [22/200], Train Loss: 0.7859, CV Loss: 1.0216\n",
      "Epoch [23/200], Train Loss: 0.7826, CV Loss: 1.0210\n",
      "Epoch [24/200], Train Loss: 0.7804, CV Loss: 1.0220\n",
      "Epoch [25/200], Train Loss: 0.7774, CV Loss: 1.0220\n",
      "Epoch [26/200], Train Loss: 0.7754, CV Loss: 1.0224\n",
      "Epoch [27/200], Train Loss: 0.7722, CV Loss: 1.0200\n",
      "Epoch [28/200], Train Loss: 0.7691, CV Loss: 1.0113\n",
      "Epoch [29/200], Train Loss: 0.7681, CV Loss: 1.0136\n",
      "Epoch [30/200], Train Loss: 0.7686, CV Loss: 1.0136\n",
      "Epoch [31/200], Train Loss: 0.7660, CV Loss: 1.0173\n",
      "Epoch [32/200], Train Loss: 0.7651, CV Loss: 1.0148\n",
      "Epoch [33/200], Train Loss: 0.7646, CV Loss: 1.0152\n",
      "Epoch [34/200], Train Loss: 0.7640, CV Loss: 1.0173\n",
      "Epoch [35/200], Train Loss: 0.7637, CV Loss: 1.0147\n",
      "Epoch [36/200], Train Loss: 0.7632, CV Loss: 1.0163\n",
      "Epoch [37/200], Train Loss: 0.7628, CV Loss: 1.0144\n",
      "Epoch [38/200], Train Loss: 0.7626, CV Loss: 1.0152\n",
      "Epoch [39/200], Train Loss: 0.7622, CV Loss: 1.0155\n",
      "Epoch [40/200], Train Loss: 0.7621, CV Loss: 1.0160\n",
      "Epoch [41/200], Train Loss: 0.7619, CV Loss: 1.0147\n",
      "Epoch [42/200], Train Loss: 0.7617, CV Loss: 1.0138\n",
      "Epoch [43/200], Train Loss: 0.7614, CV Loss: 1.0159\n",
      "Epoch [44/200], Train Loss: 0.7614, CV Loss: 1.0138\n",
      "Epoch [45/200], Train Loss: 0.7612, CV Loss: 1.0152\n",
      "Epoch [46/200], Train Loss: 0.7610, CV Loss: 1.0130\n",
      "Epoch [47/200], Train Loss: 0.7609, CV Loss: 1.0125\n",
      "Epoch [48/200], Train Loss: 0.7608, CV Loss: 1.0125\n",
      "Epoch [49/200], Train Loss: 0.7626, CV Loss: 1.0116\n",
      "Epoch [50/200], Train Loss: 0.7607, CV Loss: 1.0122\n",
      "Epoch [51/200], Train Loss: 0.7606, CV Loss: 1.0105\n",
      "Epoch [52/200], Train Loss: 0.7605, CV Loss: 1.0106\n",
      "Epoch [53/200], Train Loss: 0.7605, CV Loss: 1.0104\n",
      "Epoch [54/200], Train Loss: 0.7604, CV Loss: 1.0098\n",
      "Epoch [55/200], Train Loss: 0.7604, CV Loss: 1.0085\n",
      "Epoch [56/200], Train Loss: 0.7603, CV Loss: 1.0086\n",
      "Epoch [57/200], Train Loss: 0.7603, CV Loss: 1.0091\n",
      "Epoch [58/200], Train Loss: 0.7602, CV Loss: 1.0083\n",
      "Epoch [59/200], Train Loss: 0.7620, CV Loss: 1.0077\n",
      "Epoch [60/200], Train Loss: 0.7601, CV Loss: 1.0081\n",
      "Epoch [61/200], Train Loss: 0.7601, CV Loss: 1.0073\n",
      "Epoch [62/200], Train Loss: 0.7601, CV Loss: 1.0081\n",
      "Epoch [63/200], Train Loss: 0.7600, CV Loss: 1.0059\n",
      "Epoch [64/200], Train Loss: 0.7600, CV Loss: 1.0058\n",
      "Epoch [65/200], Train Loss: 0.7599, CV Loss: 1.0059\n",
      "Epoch [66/200], Train Loss: 0.7599, CV Loss: 1.0042\n",
      "Epoch [67/200], Train Loss: 0.7598, CV Loss: 1.0012\n",
      "Epoch [68/200], Train Loss: 0.7598, CV Loss: 0.9998\n",
      "Epoch [69/200], Train Loss: 0.7598, CV Loss: 0.9991\n",
      "Epoch [70/200], Train Loss: 0.7597, CV Loss: 0.9975\n",
      "Epoch [71/200], Train Loss: 0.7611, CV Loss: 0.9945\n",
      "Epoch [72/200], Train Loss: 0.7592, CV Loss: 1.0006\n",
      "Epoch [73/200], Train Loss: 0.7584, CV Loss: 0.9647\n",
      "Epoch [74/200], Train Loss: 0.7562, CV Loss: 0.9540\n",
      "Epoch [75/200], Train Loss: 0.7552, CV Loss: 0.9686\n",
      "Epoch [76/200], Train Loss: 0.7551, CV Loss: 0.9546\n",
      "Epoch [77/200], Train Loss: 0.7567, CV Loss: 0.9579\n",
      "Epoch [78/200], Train Loss: 0.7552, CV Loss: 0.9882\n",
      "Epoch [79/200], Train Loss: 0.7550, CV Loss: 0.9573\n",
      "Epoch [80/200], Train Loss: 0.7546, CV Loss: 0.9580\n",
      "Epoch [81/200], Train Loss: 0.7546, CV Loss: 0.9600\n",
      "Epoch [82/200], Train Loss: 0.7562, CV Loss: 0.9619\n",
      "Epoch [83/200], Train Loss: 0.7545, CV Loss: 0.9540\n",
      "Epoch [84/200], Train Loss: 0.7547, CV Loss: 0.9863\n",
      "Epoch [85/200], Train Loss: 0.7548, CV Loss: 0.9667\n",
      "Epoch [86/200], Train Loss: 0.7545, CV Loss: 0.9651\n",
      "Epoch [87/200], Train Loss: 0.7545, CV Loss: 0.9572\n",
      "Epoch [88/200], Train Loss: 0.7545, CV Loss: 0.9844\n",
      "Epoch [89/200], Train Loss: 0.7548, CV Loss: 0.9783\n",
      "Epoch [90/200], Train Loss: 0.7546, CV Loss: 0.9553\n",
      "Epoch [91/200], Train Loss: 0.7546, CV Loss: 0.9671\n",
      "Epoch [92/200], Train Loss: 0.7548, CV Loss: 0.9855\n",
      "Epoch [93/200], Train Loss: 0.7546, CV Loss: 0.9547\n",
      "Epoch [94/200], Train Loss: 0.7547, CV Loss: 0.9811\n",
      "Epoch [95/200], Train Loss: 0.7546, CV Loss: 0.9539\n",
      "Epoch [96/200], Train Loss: 0.7547, CV Loss: 0.9626\n",
      "Epoch [97/200], Train Loss: 0.7562, CV Loss: 0.9862\n",
      "Epoch [98/200], Train Loss: 0.7546, CV Loss: 0.9541\n",
      "Epoch [99/200], Train Loss: 0.7546, CV Loss: 0.9770\n",
      "Epoch [100/200], Train Loss: 0.7545, CV Loss: 0.9552\n",
      "Epoch [101/200], Train Loss: 0.7547, CV Loss: 0.9671\n",
      "Epoch [102/200], Train Loss: 0.7543, CV Loss: 0.9903\n",
      "Epoch [103/200], Train Loss: 0.7547, CV Loss: 0.9945\n",
      "Epoch [104/200], Train Loss: 0.7546, CV Loss: 0.9711\n",
      "Epoch [105/200], Train Loss: 0.7543, CV Loss: 0.9717\n",
      "Epoch [106/200], Train Loss: 0.7543, CV Loss: 0.9580\n",
      "Epoch [107/200], Train Loss: 0.7545, CV Loss: 0.9915\n",
      "Epoch [108/200], Train Loss: 0.7546, CV Loss: 0.9612\n",
      "Epoch [109/200], Train Loss: 0.7544, CV Loss: 1.0038\n",
      "Epoch [110/200], Train Loss: 0.7547, CV Loss: 0.9985\n",
      "Epoch [111/200], Train Loss: 0.7546, CV Loss: 0.9939\n",
      "Epoch [112/200], Train Loss: 0.7546, CV Loss: 0.9840\n",
      "Epoch [113/200], Train Loss: 0.7546, CV Loss: 0.9612\n",
      "Epoch [114/200], Train Loss: 0.7543, CV Loss: 1.0020\n",
      "Epoch [115/200], Train Loss: 0.7547, CV Loss: 1.0000\n",
      "Epoch [116/200], Train Loss: 0.7546, CV Loss: 0.9968\n",
      "Epoch [117/200], Train Loss: 0.7546, CV Loss: 0.9930\n",
      "Epoch [118/200], Train Loss: 0.7546, CV Loss: 0.9883\n",
      "Epoch [119/200], Train Loss: 0.7546, CV Loss: 0.9769\n",
      "Epoch [120/200], Train Loss: 0.7545, CV Loss: 0.9668\n",
      "Epoch [121/200], Train Loss: 0.7545, CV Loss: 0.9739\n",
      "Epoch [122/200], Train Loss: 0.7545, CV Loss: 1.0069\n",
      "Epoch [123/200], Train Loss: 0.7546, CV Loss: 0.9952\n",
      "Epoch [124/200], Train Loss: 0.7545, CV Loss: 0.9711\n",
      "Epoch [125/200], Train Loss: 0.7543, CV Loss: 1.0139\n",
      "Epoch [126/200], Train Loss: 0.7546, CV Loss: 1.0065\n",
      "Epoch [127/200], Train Loss: 0.7546, CV Loss: 1.0028\n",
      "Epoch [128/200], Train Loss: 0.7546, CV Loss: 1.0005\n",
      "Epoch [129/200], Train Loss: 0.7546, CV Loss: 0.9987\n",
      "Epoch [130/200], Train Loss: 0.7546, CV Loss: 0.9951\n",
      "Epoch [131/200], Train Loss: 0.7546, CV Loss: 0.9931\n",
      "Epoch [132/200], Train Loss: 0.7546, CV Loss: 0.9692\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[10], line 33\u001b[0m\n\u001b[0;32m     31\u001b[0m     optimizer\u001b[38;5;241m.\u001b[39mzero_grad()\n\u001b[0;32m     32\u001b[0m     loss\u001b[38;5;241m.\u001b[39mbackward()\n\u001b[1;32m---> 33\u001b[0m     optimizer\u001b[38;5;241m.\u001b[39mstep()\n\u001b[0;32m     35\u001b[0m     batch_losses\u001b[38;5;241m.\u001b[39mappend(loss\u001b[38;5;241m.\u001b[39mitem())\n\u001b[0;32m     37\u001b[0m \u001b[38;5;66;03m# Calculate average training loss for this epoch\u001b[39;00m\n",
      "File \u001b[1;32mD:\\anaconda3\\Lib\\site-packages\\torch\\optim\\optimizer.py:487\u001b[0m, in \u001b[0;36mOptimizer.profile_hook_step.<locals>.wrapper\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m    482\u001b[0m         \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    483\u001b[0m             \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[0;32m    484\u001b[0m                 \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m must return None or a tuple of (new_args, new_kwargs), but got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresult\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    485\u001b[0m             )\n\u001b[1;32m--> 487\u001b[0m out \u001b[38;5;241m=\u001b[39m func(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    488\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_optimizer_step_code()\n\u001b[0;32m    490\u001b[0m \u001b[38;5;66;03m# call optimizer step post hooks\u001b[39;00m\n",
      "File \u001b[1;32mD:\\anaconda3\\Lib\\site-packages\\torch\\optim\\optimizer.py:91\u001b[0m, in \u001b[0;36m_use_grad_for_differentiable.<locals>._use_grad\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m     89\u001b[0m     torch\u001b[38;5;241m.\u001b[39mset_grad_enabled(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdefaults[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdifferentiable\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[0;32m     90\u001b[0m     torch\u001b[38;5;241m.\u001b[39m_dynamo\u001b[38;5;241m.\u001b[39mgraph_break()\n\u001b[1;32m---> 91\u001b[0m     ret \u001b[38;5;241m=\u001b[39m func(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m     92\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m     93\u001b[0m     torch\u001b[38;5;241m.\u001b[39m_dynamo\u001b[38;5;241m.\u001b[39mgraph_break()\n",
      "File \u001b[1;32mD:\\anaconda3\\Lib\\site-packages\\torch\\optim\\adam.py:223\u001b[0m, in \u001b[0;36mAdam.step\u001b[1;34m(self, closure)\u001b[0m\n\u001b[0;32m    211\u001b[0m     beta1, beta2 \u001b[38;5;241m=\u001b[39m group[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mbetas\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m    213\u001b[0m     has_complex \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_init_group(\n\u001b[0;32m    214\u001b[0m         group,\n\u001b[0;32m    215\u001b[0m         params_with_grad,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    220\u001b[0m         state_steps,\n\u001b[0;32m    221\u001b[0m     )\n\u001b[1;32m--> 223\u001b[0m     adam(\n\u001b[0;32m    224\u001b[0m         params_with_grad,\n\u001b[0;32m    225\u001b[0m         grads,\n\u001b[0;32m    226\u001b[0m         exp_avgs,\n\u001b[0;32m    227\u001b[0m         exp_avg_sqs,\n\u001b[0;32m    228\u001b[0m         max_exp_avg_sqs,\n\u001b[0;32m    229\u001b[0m         state_steps,\n\u001b[0;32m    230\u001b[0m         amsgrad\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mamsgrad\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    231\u001b[0m         has_complex\u001b[38;5;241m=\u001b[39mhas_complex,\n\u001b[0;32m    232\u001b[0m         beta1\u001b[38;5;241m=\u001b[39mbeta1,\n\u001b[0;32m    233\u001b[0m         beta2\u001b[38;5;241m=\u001b[39mbeta2,\n\u001b[0;32m    234\u001b[0m         lr\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlr\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    235\u001b[0m         weight_decay\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mweight_decay\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    236\u001b[0m         eps\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124meps\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    237\u001b[0m         maximize\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmaximize\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    238\u001b[0m         foreach\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mforeach\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    239\u001b[0m         capturable\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcapturable\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    240\u001b[0m         differentiable\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdifferentiable\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    241\u001b[0m         fused\u001b[38;5;241m=\u001b[39mgroup[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfused\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m    242\u001b[0m         grad_scale\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgrad_scale\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m),\n\u001b[0;32m    243\u001b[0m         found_inf\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfound_inf\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m),\n\u001b[0;32m    244\u001b[0m     )\n\u001b[0;32m    246\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m loss\n",
      "File \u001b[1;32mD:\\anaconda3\\Lib\\site-packages\\torch\\optim\\optimizer.py:154\u001b[0m, in \u001b[0;36m_disable_dynamo_if_unsupported.<locals>.wrapper.<locals>.maybe_fallback\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m    152\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m disabled_func(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    153\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 154\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m func(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
      "File \u001b[1;32mD:\\anaconda3\\Lib\\site-packages\\torch\\optim\\adam.py:784\u001b[0m, in \u001b[0;36madam\u001b[1;34m(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, foreach, capturable, differentiable, fused, grad_scale, found_inf, has_complex, amsgrad, beta1, beta2, lr, weight_decay, eps, maximize)\u001b[0m\n\u001b[0;32m    781\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    782\u001b[0m     func \u001b[38;5;241m=\u001b[39m _single_tensor_adam\n\u001b[1;32m--> 784\u001b[0m func(\n\u001b[0;32m    785\u001b[0m     params,\n\u001b[0;32m    786\u001b[0m     grads,\n\u001b[0;32m    787\u001b[0m     exp_avgs,\n\u001b[0;32m    788\u001b[0m     exp_avg_sqs,\n\u001b[0;32m    789\u001b[0m     max_exp_avg_sqs,\n\u001b[0;32m    790\u001b[0m     state_steps,\n\u001b[0;32m    791\u001b[0m     amsgrad\u001b[38;5;241m=\u001b[39mamsgrad,\n\u001b[0;32m    792\u001b[0m     has_complex\u001b[38;5;241m=\u001b[39mhas_complex,\n\u001b[0;32m    793\u001b[0m     beta1\u001b[38;5;241m=\u001b[39mbeta1,\n\u001b[0;32m    794\u001b[0m     beta2\u001b[38;5;241m=\u001b[39mbeta2,\n\u001b[0;32m    795\u001b[0m     lr\u001b[38;5;241m=\u001b[39mlr,\n\u001b[0;32m    796\u001b[0m     weight_decay\u001b[38;5;241m=\u001b[39mweight_decay,\n\u001b[0;32m    797\u001b[0m     eps\u001b[38;5;241m=\u001b[39meps,\n\u001b[0;32m    798\u001b[0m     maximize\u001b[38;5;241m=\u001b[39mmaximize,\n\u001b[0;32m    799\u001b[0m     capturable\u001b[38;5;241m=\u001b[39mcapturable,\n\u001b[0;32m    800\u001b[0m     differentiable\u001b[38;5;241m=\u001b[39mdifferentiable,\n\u001b[0;32m    801\u001b[0m     grad_scale\u001b[38;5;241m=\u001b[39mgrad_scale,\n\u001b[0;32m    802\u001b[0m     found_inf\u001b[38;5;241m=\u001b[39mfound_inf,\n\u001b[0;32m    803\u001b[0m )\n",
      "File \u001b[1;32mD:\\anaconda3\\Lib\\site-packages\\torch\\optim\\adam.py:379\u001b[0m, in \u001b[0;36m_single_tensor_adam\u001b[1;34m(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, grad_scale, found_inf, amsgrad, has_complex, beta1, beta2, lr, weight_decay, eps, maximize, capturable, differentiable)\u001b[0m\n\u001b[0;32m    377\u001b[0m \u001b[38;5;66;03m# Decay the first and second moment running average coefficient\u001b[39;00m\n\u001b[0;32m    378\u001b[0m exp_avg\u001b[38;5;241m.\u001b[39mlerp_(grad, \u001b[38;5;241m1\u001b[39m \u001b[38;5;241m-\u001b[39m beta1)\n\u001b[1;32m--> 379\u001b[0m exp_avg_sq\u001b[38;5;241m.\u001b[39mmul_(beta2)\u001b[38;5;241m.\u001b[39maddcmul_(grad, grad\u001b[38;5;241m.\u001b[39mconj(), value\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m \u001b[38;5;241m-\u001b[39m beta2)\n\u001b[0;32m    381\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m capturable \u001b[38;5;129;01mor\u001b[39;00m differentiable:\n\u001b[0;32m    382\u001b[0m     step \u001b[38;5;241m=\u001b[39m step_t\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# Define the model parameters\n",
    "hidden_size = 10\n",
    "\n",
    "# Instantiate the model\n",
    "input_size = trX.shape[1]\n",
    "num_classes = 4 # 3 movements and static\n",
    "model = FNN(input_size, hidden_size, num_classes)\n",
    "print(model)\n",
    "\n",
    "# Define loss function and optimizer\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters())\n",
    "\n",
    "# Lists to store losses\n",
    "train_losses = []\n",
    "te_losses = []\n",
    "\n",
    "# Number of epochs\n",
    "num_epochs = 200\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    batch_losses = []\n",
    "    \n",
    "    for batch_x, batch_y in trLoader:\n",
    "        # Forward pass\n",
    "        outputs = model(batch_x)\n",
    "        loss = criterion(outputs, batch_y)\n",
    "        \n",
    "        # Backward pass and optimize\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        batch_losses.append(loss.item())\n",
    "    \n",
    "    # Calculate average training loss for this epoch\n",
    "    avg_train_loss = sum(batch_losses) / len(batch_losses)\n",
    "    train_losses.append(avg_train_loss)\n",
    "    \n",
    "    # Evaluate on cross-validation set\n",
    "    model.eval()\n",
    "    te_batch_losses = []\n",
    "    with torch.no_grad():\n",
    "        for te_x, te_y in teLoader:\n",
    "            te_outputs = model(te_x)\n",
    "            te_loss = criterion(te_outputs, te_y)\n",
    "            te_batch_losses.append(te_loss.item())\n",
    "    \n",
    "    avg_te_loss = sum(te_batch_losses) / len(te_batch_losses)\n",
    "    te_losses.append(avg_te_loss)\n",
    "    \n",
    "    print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {avg_train_loss:.4f}, CV Loss: {avg_te_loss:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "18542042-e984-43dc-8f7d-0103e3717a3b",
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算精度与学习曲线"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "763f47ed-1913-4b49-a98f-e4c757a1e714",
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# Calculate and print accuracies for training and cross-validation sets\n",
    "model.eval()\n",
    "with torch.no_grad():\n",
    "    # Training set accuracy\n",
    "    tr_correct = 0\n",
    "    tr_total = 0\n",
    "    for images, labels in trLoader:\n",
    "        outputs = model(images)\n",
    "        _, predicted = torch.max(outputs, 1)\n",
    "        _, true_labels = torch.max(labels, 1)\n",
    "        tr_total += labels.size(0)\n",
    "        tr_correct += (predicted == true_labels).sum().item()\n",
    "    \n",
    "    tr_accuracy = 100 * tr_correct / tr_total\n",
    "    \n",
    "    # test set accuracy\n",
    "    te_correct = 0\n",
    "    te_total = 0\n",
    "    for images, labels in teLoader:\n",
    "        outputs = model(images)\n",
    "        _, predicted = torch.max(outputs, 1)\n",
    "        _, true_labels = torch.max(labels, 1)\n",
    "        te_total += labels.size(0)\n",
    "        te_correct += (predicted == true_labels).sum().item()\n",
    "    \n",
    "    te_accuracy = 100 * te_correct / te_total\n",
    "\n",
    "print(f'Accuracy on training set: {tr_accuracy:.2f}%')\n",
    "print(f'Accuracy on cross-validation set: {te_accuracy:.2f}%')\n",
    "\n",
    "# Plot training and cross-validation losses\n",
    "plt.figure(figsize=(10, 5))\n",
    "plt.plot(range(1, num_epochs+1), train_losses, label='Training Loss')\n",
    "plt.plot(range(1, num_epochs+1), te_losses, label='Testing Loss')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Loss')\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "627990a0-8d5d-4bf3-92e7-55cd20489bba",
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取MNIST数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "766fd638-17a7-4876-9f17-808adb3fecbd",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "label_size = 18 # Label size\n",
    "ticklabel_size = 14 # Tick label size\n",
    "\n",
    "class FlattenTransform:\n",
    "    def __call__(self, tensor):\n",
    "        ''' \n",
    "        Flatten tensor into an 1-D vector\n",
    "        '''\n",
    "        return tensor.view(-1)\n",
    "    \n",
    "# Define a transform to normalize the data\n",
    "transform = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    FlattenTransform()\n",
    "])\n",
    "\n",
    "# Load test data from the MNIST\n",
    "testset = torchvision.datasets.MNIST(root='./Data', train=False, download=False, transform=transform)\n",
    "print(f\"Test set size: {len(testset)}\")\n",
    "\n",
    "# Load training data from the MNIST\n",
    "trainset = torchvision.datasets.MNIST(root='./Data', train=True, download=False, transform=transform)\n",
    "print(f\"Training set size: {len(trainset)}\")\n",
    "\n",
    "# Rate of trX and cvX\n",
    "tr_cv_rate = 0.8\n",
    "\n",
    "# Create a list to store indices for each class\n",
    "class_indices = [[] for _ in range(10)]  # 10 classes in MNIST\n",
    "\n",
    "# Populate class_indices\n",
    "for idx, (_, label) in enumerate(trainset):\n",
    "    class_indices[label].append(idx)\n",
    "\n",
    "# Calculate the number of samples for each class in training and validation sets\n",
    "train_size_per_class = int(tr_cv_rate * min(len(indices) for indices in class_indices))\n",
    "val_size_per_class = min(len(indices) for indices in class_indices) - train_size_per_class\n",
    "\n",
    "# Create balanced train and validation sets\n",
    "train_indices = []\n",
    "val_indices = []\n",
    "for indices in class_indices:\n",
    "    train_indices.extend(indices[:train_size_per_class])\n",
    "    val_indices.extend(indices[train_size_per_class:train_size_per_class + val_size_per_class])\n",
    "\n",
    "# Create Subset datasets\n",
    "from torch.utils.data import Subset\n",
    "trX = Subset(trainset, train_indices)\n",
    "cvX = Subset(trainset, val_indices)\n",
    "\n",
    "print(f\"Number of training samples: {len(trX)}\")\n",
    "print(f\"Number of cross-validation samples: {len(cvX)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ab74face-7098-4f3b-8312-c275ea56fb5d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#构建DataLoaders，准备训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "21df4ffd-0942-421e-815f-b52d11757929",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 42 # Define training batch\n",
    "\n",
    "def one_hot_collate(batch):\n",
    "    data = torch.stack([item[0] for item in batch])\n",
    "    labels = torch.tensor([item[1] for item in batch])\n",
    "    one_hot_labels = torch.zeros(labels.size(0), 10)  # 10 classes in MNIST\n",
    "    one_hot_labels.scatter_(1, labels.unsqueeze(1), 1)\n",
    "    return data, one_hot_labels\n",
    "\n",
    "trLoader = torch.utils.data.DataLoader(trX, batch_size=batch_size, shuffle=True, num_workers=0, collate_fn=one_hot_collate)\n",
    "cvLoader = torch.utils.data.DataLoader(cvX, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=one_hot_collate)\n",
    "teLoader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=one_hot_collate)\n",
    "\n",
    "# Get a batch of training data\n",
    "dataiter = iter(trLoader)\n",
    "data, labels = next(dataiter)\n",
    "\n",
    "input_size = data[0].numpy().shape[0]\n",
    "print(f'Input_size is {input_size}')\n",
    "print(labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a1f7029e-45aa-4809-879a-8882d55088d7",
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义并训练全连接神经网络\n",
    "#输入：1-D向量\n",
    "#输出：手写字母类型的概率分布\n",
    "#隐藏层：2层\n",
    "#节点数：100个/层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6a18c28f-6e14-407a-88a4-17414dcd54a9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn\n",
    "\n",
    "class FNN(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, num_classes):\n",
    "        super(FNN, self).__init__()\n",
    "        self.fc1 = nn.Linear(input_size, hidden_size)\n",
    "        self.relu1 = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(hidden_size, hidden_size)\n",
    "        self.relu2 = nn.ReLU()\n",
    "        self.fc3 = nn.Linear(hidden_size, num_classes)\n",
    "        self.softmax = nn.Softmax(dim=1)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.fc1(x)\n",
    "        x = self.relu1(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.relu2(x)\n",
    "        x = self.fc3(x)\n",
    "        out = self.softmax(x)\n",
    "        return out\n",
    "\n",
    "# Define the model parameters\n",
    "hidden_size = 10\n",
    "num_classes = 10  # MNIST has 10 classes (digits 0-9)\n",
    "\n",
    "# Instantiate the model\n",
    "model = FNN(input_size, hidden_size, num_classes)\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f1357bbc-3ac5-48e1-af04-c5d52b01932a",
   "metadata": {},
   "outputs": [],
   "source": [
    "#使用Adam作为Optimizor训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "18ba7cd6-bf29-4ae9-8d86-5c6f3bd77b38",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define loss function and optimizer\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters())\n",
    "\n",
    "# Lists to store losses\n",
    "train_losses = []\n",
    "cv_losses = []\n",
    "\n",
    "# Number of epochs\n",
    "num_epochs = 50\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    batch_losses = []\n",
    "    \n",
    "    for batch_x, batch_y in trLoader:\n",
    "        # Forward pass\n",
    "        outputs = model(batch_x)\n",
    "        loss = criterion(outputs, batch_y)\n",
    "        \n",
    "        # Backward pass and optimize\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        batch_losses.append(loss.item())\n",
    "    \n",
    "    # Calculate average training loss for this epoch\n",
    "    avg_train_loss = sum(batch_losses) / len(batch_losses)\n",
    "    train_losses.append(avg_train_loss)\n",
    "    \n",
    "    # Evaluate on cross-validation set\n",
    "    model.eval()\n",
    "    cv_batch_losses = []\n",
    "    with torch.no_grad():\n",
    "        for cv_x, cv_y in cvLoader:\n",
    "            cv_outputs = model(cv_x)\n",
    "            cv_loss = criterion(cv_outputs, cv_y)\n",
    "            cv_batch_losses.append(cv_loss.item())\n",
    "    \n",
    "    avg_cv_loss = sum(cv_batch_losses) / len(cv_batch_losses)\n",
    "    cv_losses.append(avg_cv_loss)\n",
    "    \n",
    "    print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {avg_train_loss:.4f}, CV Loss: {avg_cv_loss:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "18e4cba2-c3ff-424d-ad60-016c5dc095c4",
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算识别精度，展示学习曲线"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3df9f179-f614-418a-93a4-4c0db09c536b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy on training set: 94.86%\n",
      "Accuracy on cross-validation set: 91.68%\n"
     ]
    }
   ],
   "source": [
    "# Calculate and print accuracies for training and cross-validation sets\n",
    "model.eval()\n",
    "with torch.no_grad():\n",
    "    # Training set accuracy\n",
    "    tr_correct = 0\n",
    "    tr_total = 0\n",
    "    for images, labels in trLoader:\n",
    "        outputs = model(images)\n",
    "        _, predicted = torch.max(outputs, 1)\n",
    "        # 如果 labels 是 one-hot 编码，则需要 argmax 获取原始标签\n",
    "        if len(labels.shape) > 1 and labels.shape[1] > 1:\n",
    "            true_labels = torch.argmax(labels, dim=1)\n",
    "        else:\n",
    "            true_labels = labels\n",
    "        tr_total += labels.size(0)\n",
    "        tr_correct += (predicted == true_labels).sum().item()\n",
    "\n",
    "    tr_accuracy = 100 * tr_correct / tr_total\n",
    "\n",
    "    # Cross-validation set accuracy\n",
    "    cv_correct = 0\n",
    "    cv_total = 0\n",
    "    for images, labels in cvLoader:\n",
    "        outputs = model(images)\n",
    "        _, predicted = torch.max(outputs, 1)\n",
    "        # 如果 labels 是 one-hot 编码，则需要 argmax 获取原始标签\n",
    "        if len(labels.shape) > 1 and labels.shape[1] > 1:\n",
    "            true_labels = torch.argmax(labels, dim=1)\n",
    "        else:\n",
    "            true_labels = labels\n",
    "        cv_total += labels.size(0)\n",
    "        cv_correct += (predicted == true_labels).sum().item()\n",
    "\n",
    "    cv_accuracy = 100 * cv_correct / cv_total\n",
    "\n",
    "print(f'Accuracy on training set: {tr_accuracy:.2f}%')\n",
    "print(f'Accuracy on cross-validation set: {cv_accuracy:.2f}%')\n",
    "\n",
    "# Plot training and cross-validation losses\n",
    "plt.figure(figsize=(10, 5))\n",
    "plt.plot(range(1, num_epochs+1), train_losses, label='Training Loss')\n",
    "plt.plot(range(1, num_epochs+1), cv_losses, label='Cross-Validation Loss')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Loss')\n",
    "plt.title('Training and Cross-Validation Loss')\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b4672fd9-0e72-47ab-b59a-0d1ef523e547",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
