{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_7196\\1195215114.py:91: UserWarning: This overload of add_ is deprecated:\n",
      "\tadd_(Number alpha, Tensor other)\n",
      "Consider using one of the following signatures instead:\n",
      "\tadd_(Tensor other, *, Number alpha) (Triggered internally at ..\\torch\\csrc\\utils\\python_arg_parser.cpp:1630.)\n",
      "  p.data.add_(-group['lr'], grad)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/235], Loss: 2.2959, Accuracy: 10.01%\n",
      "Epoch [2/235], Loss: 2.2865, Accuracy: 20.07%\n",
      "Epoch [3/235], Loss: 2.2786, Accuracy: 14.03%\n",
      "Epoch [4/235], Loss: 2.2667, Accuracy: 11.35%\n",
      "Epoch [5/235], Loss: 2.2551, Accuracy: 21.63%\n",
      "Epoch [6/235], Loss: 2.2430, Accuracy: 13.27%\n",
      "Epoch [7/235], Loss: 2.2206, Accuracy: 23.16%\n",
      "Epoch [8/235], Loss: 2.1995, Accuracy: 19.76%\n",
      "Epoch [9/235], Loss: 2.1595, Accuracy: 43.62%\n",
      "Epoch [10/235], Loss: 2.1129, Accuracy: 43.48%\n",
      "Epoch [11/235], Loss: 2.0515, Accuracy: 55.37%\n",
      "Epoch [12/235], Loss: 1.9585, Accuracy: 51.15%\n",
      "Epoch [13/235], Loss: 1.8340, Accuracy: 53.66%\n",
      "Epoch [14/235], Loss: 1.7211, Accuracy: 55.62%\n",
      "Epoch [15/235], Loss: 1.8710, Accuracy: 28.10%\n",
      "Epoch [16/235], Loss: 1.9431, Accuracy: 45.92%\n",
      "Epoch [17/235], Loss: 1.8143, Accuracy: 49.77%\n",
      "Epoch [18/235], Loss: 1.6366, Accuracy: 61.74%\n",
      "Epoch [19/235], Loss: 1.4087, Accuracy: 62.08%\n",
      "Epoch [20/235], Loss: 1.2061, Accuracy: 64.93%\n",
      "Epoch [21/235], Loss: 1.1473, Accuracy: 66.14%\n",
      "Epoch [22/235], Loss: 1.0787, Accuracy: 61.63%\n",
      "Epoch [23/235], Loss: 1.1166, Accuracy: 63.48%\n",
      "Epoch [24/235], Loss: 1.0413, Accuracy: 68.79%\n",
      "Epoch [25/235], Loss: 0.9304, Accuracy: 74.61%\n",
      "Epoch [26/235], Loss: 0.7770, Accuracy: 79.31%\n",
      "Epoch [27/235], Loss: 0.6387, Accuracy: 82.10%\n",
      "Epoch [28/235], Loss: 0.5638, Accuracy: 82.51%\n",
      "Epoch [29/235], Loss: 0.7074, Accuracy: 75.79%\n",
      "Epoch [30/235], Loss: 1.0235, Accuracy: 71.33%\n",
      "Epoch [31/235], Loss: 0.9546, Accuracy: 79.49%\n",
      "Epoch [32/235], Loss: 0.9721, Accuracy: 79.31%\n",
      "Epoch [33/235], Loss: 0.9460, Accuracy: 78.84%\n",
      "Epoch [34/235], Loss: 0.8484, Accuracy: 79.98%\n",
      "Epoch [35/235], Loss: 0.7279, Accuracy: 81.62%\n",
      "Epoch [36/235], Loss: 0.6124, Accuracy: 82.37%\n",
      "Epoch [37/235], Loss: 0.4919, Accuracy: 86.31%\n",
      "Epoch [38/235], Loss: 0.3918, Accuracy: 89.68%\n",
      "Epoch [39/235], Loss: 0.3601, Accuracy: 89.34%\n",
      "Epoch [40/235], Loss: 0.3174, Accuracy: 90.56%\n",
      "Epoch [41/235], Loss: 0.2870, Accuracy: 91.53%\n",
      "Epoch [42/235], Loss: 0.2741, Accuracy: 91.73%\n",
      "Epoch [43/235], Loss: 0.2600, Accuracy: 91.98%\n",
      "Epoch [44/235], Loss: 0.2380, Accuracy: 92.83%\n",
      "Epoch [45/235], Loss: 0.2480, Accuracy: 92.44%\n",
      "Epoch [46/235], Loss: 0.2199, Accuracy: 93.49%\n",
      "Epoch [47/235], Loss: 0.2060, Accuracy: 93.81%\n",
      "Epoch [48/235], Loss: 0.1989, Accuracy: 94.05%\n",
      "Epoch [49/235], Loss: 0.1993, Accuracy: 94.12%\n",
      "Epoch [50/235], Loss: 0.1898, Accuracy: 94.23%\n",
      "Epoch [51/235], Loss: 0.1870, Accuracy: 94.23%\n",
      "Epoch [52/235], Loss: 0.1795, Accuracy: 94.50%\n",
      "Epoch [53/235], Loss: 0.1684, Accuracy: 94.93%\n",
      "Epoch [54/235], Loss: 0.1654, Accuracy: 95.07%\n",
      "Epoch [55/235], Loss: 0.1601, Accuracy: 95.14%\n",
      "Epoch [56/235], Loss: 0.1576, Accuracy: 95.17%\n",
      "Epoch [57/235], Loss: 0.1499, Accuracy: 95.39%\n",
      "Epoch [58/235], Loss: 0.1477, Accuracy: 95.47%\n",
      "Epoch [59/235], Loss: 0.1587, Accuracy: 95.05%\n",
      "Epoch [60/235], Loss: 0.1441, Accuracy: 95.55%\n",
      "Epoch [61/235], Loss: 0.1381, Accuracy: 95.75%\n",
      "Epoch [62/235], Loss: 0.1378, Accuracy: 95.79%\n",
      "Epoch [63/235], Loss: 0.1376, Accuracy: 95.79%\n",
      "Epoch [64/235], Loss: 0.1279, Accuracy: 96.11%\n",
      "Epoch [65/235], Loss: 0.1280, Accuracy: 96.09%\n",
      "Epoch [66/235], Loss: 0.1292, Accuracy: 96.03%\n",
      "Epoch [67/235], Loss: 0.1256, Accuracy: 96.14%\n",
      "Epoch [68/235], Loss: 0.1253, Accuracy: 96.17%\n",
      "Epoch [69/235], Loss: 0.1206, Accuracy: 96.27%\n",
      "Epoch [70/235], Loss: 0.1145, Accuracy: 96.45%\n",
      "Epoch [71/235], Loss: 0.1130, Accuracy: 96.53%\n",
      "Epoch [72/235], Loss: 0.1153, Accuracy: 96.46%\n",
      "Epoch [73/235], Loss: 0.1166, Accuracy: 96.40%\n",
      "Epoch [74/235], Loss: 0.1111, Accuracy: 96.61%\n",
      "Epoch [75/235], Loss: 0.1082, Accuracy: 96.68%\n",
      "Epoch [76/235], Loss: 0.1084, Accuracy: 96.70%\n",
      "Epoch [77/235], Loss: 0.1090, Accuracy: 96.68%\n",
      "Epoch [78/235], Loss: 0.1075, Accuracy: 96.68%\n",
      "Epoch [79/235], Loss: 0.1085, Accuracy: 96.67%\n",
      "Epoch [80/235], Loss: 0.1037, Accuracy: 96.81%\n",
      "Epoch [81/235], Loss: 0.1006, Accuracy: 96.93%\n",
      "Epoch [82/235], Loss: 0.0989, Accuracy: 97.02%\n",
      "Epoch [83/235], Loss: 0.0965, Accuracy: 97.12%\n",
      "Epoch [84/235], Loss: 0.0941, Accuracy: 97.20%\n",
      "Epoch [85/235], Loss: 0.0946, Accuracy: 97.18%\n",
      "Epoch [86/235], Loss: 0.0942, Accuracy: 97.22%\n",
      "Epoch [87/235], Loss: 0.0906, Accuracy: 97.22%\n",
      "Epoch [88/235], Loss: 0.0896, Accuracy: 97.26%\n",
      "Epoch [89/235], Loss: 0.0862, Accuracy: 97.39%\n",
      "Epoch [90/235], Loss: 0.0876, Accuracy: 97.37%\n",
      "Epoch [91/235], Loss: 0.0859, Accuracy: 97.43%\n",
      "Epoch [92/235], Loss: 0.0876, Accuracy: 97.39%\n",
      "Epoch [93/235], Loss: 0.0873, Accuracy: 97.36%\n",
      "Epoch [94/235], Loss: 0.0855, Accuracy: 97.39%\n",
      "Epoch [95/235], Loss: 0.0848, Accuracy: 97.34%\n",
      "Epoch [96/235], Loss: 0.0887, Accuracy: 97.22%\n",
      "Epoch [97/235], Loss: 0.0937, Accuracy: 97.06%\n",
      "Epoch [98/235], Loss: 0.0945, Accuracy: 97.02%\n",
      "Epoch [99/235], Loss: 0.0890, Accuracy: 97.17%\n",
      "Epoch [100/235], Loss: 0.0889, Accuracy: 97.24%\n",
      "Epoch [101/235], Loss: 0.0843, Accuracy: 97.43%\n",
      "Epoch [102/235], Loss: 0.0831, Accuracy: 97.46%\n",
      "Epoch [103/235], Loss: 0.0872, Accuracy: 97.25%\n",
      "Epoch [104/235], Loss: 0.0900, Accuracy: 97.08%\n",
      "Epoch [105/235], Loss: 0.0891, Accuracy: 97.16%\n",
      "Epoch [106/235], Loss: 0.0892, Accuracy: 97.20%\n",
      "Epoch [107/235], Loss: 0.0859, Accuracy: 97.33%\n",
      "Epoch [108/235], Loss: 0.0829, Accuracy: 97.43%\n",
      "Epoch [109/235], Loss: 0.0868, Accuracy: 97.36%\n",
      "Epoch [110/235], Loss: 0.0866, Accuracy: 97.42%\n",
      "Epoch [111/235], Loss: 0.0890, Accuracy: 97.32%\n",
      "Epoch [112/235], Loss: 0.0875, Accuracy: 97.40%\n",
      "Epoch [113/235], Loss: 0.0852, Accuracy: 97.44%\n",
      "Epoch [114/235], Loss: 0.0853, Accuracy: 97.42%\n",
      "Epoch [115/235], Loss: 0.0821, Accuracy: 97.52%\n",
      "Epoch [116/235], Loss: 0.0761, Accuracy: 97.73%\n",
      "Epoch [117/235], Loss: 0.0733, Accuracy: 97.82%\n",
      "Epoch [118/235], Loss: 0.0745, Accuracy: 97.80%\n",
      "Epoch [119/235], Loss: 0.0785, Accuracy: 97.64%\n",
      "Epoch [120/235], Loss: 0.0772, Accuracy: 97.64%\n",
      "Epoch [121/235], Loss: 0.0731, Accuracy: 97.80%\n",
      "Epoch [122/235], Loss: 0.0704, Accuracy: 97.88%\n",
      "Epoch [123/235], Loss: 0.0698, Accuracy: 97.92%\n",
      "Epoch [124/235], Loss: 0.0704, Accuracy: 97.86%\n",
      "Epoch [125/235], Loss: 0.0720, Accuracy: 97.80%\n",
      "Epoch [126/235], Loss: 0.0722, Accuracy: 97.78%\n",
      "Epoch [127/235], Loss: 0.0727, Accuracy: 97.72%\n",
      "Epoch [128/235], Loss: 0.0725, Accuracy: 97.75%\n",
      "Epoch [129/235], Loss: 0.0735, Accuracy: 97.73%\n",
      "Epoch [130/235], Loss: 0.0743, Accuracy: 97.73%\n",
      "Epoch [131/235], Loss: 0.0739, Accuracy: 97.79%\n",
      "Epoch [132/235], Loss: 0.0692, Accuracy: 97.90%\n",
      "Epoch [133/235], Loss: 0.0668, Accuracy: 97.93%\n",
      "Epoch [134/235], Loss: 0.0622, Accuracy: 98.07%\n",
      "Epoch [135/235], Loss: 0.0604, Accuracy: 98.14%\n",
      "Epoch [136/235], Loss: 0.0613, Accuracy: 98.12%\n",
      "Epoch [137/235], Loss: 0.0632, Accuracy: 98.07%\n",
      "Epoch [138/235], Loss: 0.0665, Accuracy: 97.98%\n",
      "Epoch [139/235], Loss: 0.0688, Accuracy: 97.93%\n",
      "Epoch [140/235], Loss: 0.0683, Accuracy: 97.97%\n",
      "Epoch [141/235], Loss: 0.0656, Accuracy: 98.09%\n",
      "Epoch [142/235], Loss: 0.0634, Accuracy: 98.13%\n",
      "Epoch [143/235], Loss: 0.0609, Accuracy: 98.16%\n",
      "Epoch [144/235], Loss: 0.0603, Accuracy: 98.21%\n",
      "Epoch [145/235], Loss: 0.0619, Accuracy: 98.18%\n",
      "Epoch [146/235], Loss: 0.0659, Accuracy: 98.09%\n",
      "Epoch [147/235], Loss: 0.0701, Accuracy: 97.93%\n",
      "Epoch [148/235], Loss: 0.0701, Accuracy: 97.89%\n",
      "Epoch [149/235], Loss: 0.0655, Accuracy: 98.03%\n",
      "Epoch [150/235], Loss: 0.0676, Accuracy: 97.96%\n",
      "Epoch [151/235], Loss: 0.0667, Accuracy: 98.00%\n",
      "Epoch [152/235], Loss: 0.0636, Accuracy: 98.13%\n",
      "Epoch [153/235], Loss: 0.0644, Accuracy: 98.13%\n",
      "Epoch [154/235], Loss: 0.0630, Accuracy: 98.16%\n",
      "Epoch [155/235], Loss: 0.0649, Accuracy: 98.06%\n",
      "Epoch [156/235], Loss: 0.0679, Accuracy: 97.93%\n",
      "Epoch [157/235], Loss: 0.0727, Accuracy: 97.70%\n",
      "Epoch [158/235], Loss: 0.0695, Accuracy: 97.81%\n",
      "Epoch [159/235], Loss: 0.0666, Accuracy: 97.94%\n",
      "Epoch [160/235], Loss: 0.0682, Accuracy: 97.86%\n",
      "Epoch [161/235], Loss: 0.0689, Accuracy: 97.82%\n",
      "Epoch [162/235], Loss: 0.0642, Accuracy: 97.99%\n",
      "Epoch [163/235], Loss: 0.0655, Accuracy: 97.99%\n",
      "Epoch [164/235], Loss: 0.0678, Accuracy: 97.95%\n",
      "Epoch [165/235], Loss: 0.0711, Accuracy: 97.87%\n",
      "Epoch [166/235], Loss: 0.0696, Accuracy: 97.92%\n",
      "Epoch [167/235], Loss: 0.0660, Accuracy: 98.02%\n",
      "Epoch [168/235], Loss: 0.0638, Accuracy: 98.09%\n",
      "Epoch [169/235], Loss: 0.0593, Accuracy: 98.25%\n",
      "Epoch [170/235], Loss: 0.0569, Accuracy: 98.34%\n",
      "Epoch [171/235], Loss: 0.0590, Accuracy: 98.25%\n",
      "Epoch [172/235], Loss: 0.0601, Accuracy: 98.24%\n",
      "Epoch [173/235], Loss: 0.0624, Accuracy: 98.10%\n",
      "Epoch [174/235], Loss: 0.0636, Accuracy: 98.10%\n",
      "Epoch [175/235], Loss: 0.0640, Accuracy: 98.06%\n",
      "Epoch [176/235], Loss: 0.0634, Accuracy: 98.08%\n",
      "Epoch [177/235], Loss: 0.0641, Accuracy: 98.02%\n",
      "Epoch [178/235], Loss: 0.0641, Accuracy: 98.05%\n",
      "Epoch [179/235], Loss: 0.0641, Accuracy: 98.03%\n",
      "Epoch [180/235], Loss: 0.0635, Accuracy: 98.06%\n",
      "Epoch [181/235], Loss: 0.0621, Accuracy: 98.14%\n",
      "Epoch [182/235], Loss: 0.0619, Accuracy: 98.11%\n",
      "Epoch [183/235], Loss: 0.0635, Accuracy: 98.06%\n",
      "Epoch [184/235], Loss: 0.0645, Accuracy: 98.03%\n",
      "Epoch [185/235], Loss: 0.0657, Accuracy: 98.01%\n",
      "Epoch [186/235], Loss: 0.0655, Accuracy: 98.01%\n",
      "Epoch [187/235], Loss: 0.0677, Accuracy: 97.94%\n",
      "Epoch [188/235], Loss: 0.0697, Accuracy: 97.84%\n",
      "Epoch [189/235], Loss: 0.0658, Accuracy: 97.89%\n",
      "Epoch [190/235], Loss: 0.0614, Accuracy: 98.04%\n",
      "Epoch [191/235], Loss: 0.0566, Accuracy: 98.23%\n",
      "Epoch [192/235], Loss: 0.0555, Accuracy: 98.28%\n",
      "Epoch [193/235], Loss: 0.0562, Accuracy: 98.24%\n",
      "Epoch [194/235], Loss: 0.0595, Accuracy: 98.16%\n",
      "Epoch [195/235], Loss: 0.0602, Accuracy: 98.17%\n",
      "Epoch [196/235], Loss: 0.0597, Accuracy: 98.20%\n",
      "Epoch [197/235], Loss: 0.0602, Accuracy: 98.19%\n",
      "Epoch [198/235], Loss: 0.0591, Accuracy: 98.25%\n",
      "Epoch [199/235], Loss: 0.0593, Accuracy: 98.23%\n",
      "Epoch [200/235], Loss: 0.0578, Accuracy: 98.29%\n",
      "Epoch [201/235], Loss: 0.0569, Accuracy: 98.27%\n",
      "Epoch [202/235], Loss: 0.0569, Accuracy: 98.27%\n",
      "Epoch [203/235], Loss: 0.0576, Accuracy: 98.23%\n",
      "Epoch [204/235], Loss: 0.0574, Accuracy: 98.26%\n",
      "Epoch [205/235], Loss: 0.0577, Accuracy: 98.25%\n",
      "Epoch [206/235], Loss: 0.0603, Accuracy: 98.12%\n",
      "Epoch [207/235], Loss: 0.0625, Accuracy: 98.03%\n",
      "Epoch [208/235], Loss: 0.0583, Accuracy: 98.21%\n",
      "Epoch [209/235], Loss: 0.0571, Accuracy: 98.29%\n",
      "Epoch [210/235], Loss: 0.0547, Accuracy: 98.38%\n",
      "Epoch [211/235], Loss: 0.0519, Accuracy: 98.46%\n",
      "Epoch [212/235], Loss: 0.0527, Accuracy: 98.42%\n",
      "Epoch [213/235], Loss: 0.0536, Accuracy: 98.35%\n",
      "Epoch [214/235], Loss: 0.0539, Accuracy: 98.34%\n",
      "Epoch [215/235], Loss: 0.0531, Accuracy: 98.37%\n",
      "Epoch [216/235], Loss: 0.0539, Accuracy: 98.36%\n",
      "Epoch [217/235], Loss: 0.0548, Accuracy: 98.35%\n",
      "Epoch [218/235], Loss: 0.0539, Accuracy: 98.35%\n",
      "Epoch [219/235], Loss: 0.0552, Accuracy: 98.34%\n",
      "Epoch [220/235], Loss: 0.0569, Accuracy: 98.28%\n",
      "Epoch [221/235], Loss: 0.0574, Accuracy: 98.23%\n",
      "Epoch [222/235], Loss: 0.0568, Accuracy: 98.28%\n",
      "Epoch [223/235], Loss: 0.0563, Accuracy: 98.32%\n",
      "Epoch [224/235], Loss: 0.0552, Accuracy: 98.34%\n",
      "Epoch [225/235], Loss: 0.0533, Accuracy: 98.35%\n",
      "Epoch [226/235], Loss: 0.0524, Accuracy: 98.44%\n",
      "Epoch [227/235], Loss: 0.0530, Accuracy: 98.43%\n",
      "Epoch [228/235], Loss: 0.0543, Accuracy: 98.39%\n",
      "Epoch [229/235], Loss: 0.0562, Accuracy: 98.32%\n",
      "Epoch [230/235], Loss: 0.0562, Accuracy: 98.33%\n",
      "Epoch [231/235], Loss: 0.0580, Accuracy: 98.32%\n",
      "Epoch [232/235], Loss: 0.0581, Accuracy: 98.28%\n",
      "Epoch [233/235], Loss: 0.0576, Accuracy: 98.25%\n",
      "Epoch [234/235], Loss: 0.0583, Accuracy: 98.20%\n",
      "Epoch [235/235], Loss: 0.0602, Accuracy: 98.16%\n",
      "Test Accuracy: 98.18%\n",
      "Epoch [1/235], Loss: 2.2971, Accuracy: 10.86%\n",
      "Epoch [2/235], Loss: 2.2893, Accuracy: 16.75%\n",
      "Epoch [3/235], Loss: 2.2853, Accuracy: 10.44%\n",
      "Epoch [4/235], Loss: 2.2760, Accuracy: 17.44%\n",
      "Epoch [5/235], Loss: 2.2673, Accuracy: 32.42%\n",
      "Epoch [6/235], Loss: 2.2579, Accuracy: 27.98%\n",
      "Epoch [7/235], Loss: 2.2513, Accuracy: 17.88%\n",
      "Epoch [8/235], Loss: 2.2367, Accuracy: 27.55%\n",
      "Epoch [9/235], Loss: 2.2234, Accuracy: 45.66%\n",
      "Epoch [10/235], Loss: 2.2100, Accuracy: 23.42%\n",
      "Epoch [11/235], Loss: 2.1982, Accuracy: 31.99%\n",
      "Epoch [12/235], Loss: 2.1896, Accuracy: 30.80%\n",
      "Epoch [13/235], Loss: 2.1606, Accuracy: 35.45%\n",
      "Epoch [14/235], Loss: 2.1498, Accuracy: 38.25%\n",
      "Epoch [15/235], Loss: 2.1207, Accuracy: 40.17%\n",
      "Epoch [16/235], Loss: 2.0939, Accuracy: 57.84%\n",
      "Epoch [17/235], Loss: 2.0639, Accuracy: 44.15%\n",
      "Epoch [18/235], Loss: 2.0220, Accuracy: 38.73%\n",
      "Epoch [19/235], Loss: 1.9954, Accuracy: 48.78%\n",
      "Epoch [20/235], Loss: 1.9364, Accuracy: 42.64%\n",
      "Epoch [21/235], Loss: 1.8842, Accuracy: 55.34%\n",
      "Epoch [22/235], Loss: 1.8633, Accuracy: 41.83%\n",
      "Epoch [23/235], Loss: 1.9300, Accuracy: 22.86%\n",
      "Epoch [24/235], Loss: 1.8553, Accuracy: 37.51%\n",
      "Epoch [25/235], Loss: 1.7612, Accuracy: 46.29%\n",
      "Epoch [26/235], Loss: 1.5943, Accuracy: 69.58%\n",
      "Epoch [27/235], Loss: 1.4488, Accuracy: 71.72%\n",
      "Epoch [28/235], Loss: 1.3438, Accuracy: 67.76%\n",
      "Epoch [29/235], Loss: 1.5384, Accuracy: 40.62%\n",
      "Epoch [30/235], Loss: 1.4827, Accuracy: 52.63%\n",
      "Epoch [31/235], Loss: 1.3617, Accuracy: 65.95%\n",
      "Epoch [32/235], Loss: 1.1516, Accuracy: 68.00%\n",
      "Epoch [33/235], Loss: 1.2371, Accuracy: 57.30%\n",
      "Epoch [34/235], Loss: 1.3894, Accuracy: 56.94%\n",
      "Epoch [35/235], Loss: 1.1771, Accuracy: 60.01%\n",
      "Epoch [36/235], Loss: 1.0243, Accuracy: 64.71%\n",
      "Epoch [37/235], Loss: 1.0436, Accuracy: 64.75%\n",
      "Epoch [38/235], Loss: 1.0169, Accuracy: 67.32%\n",
      "Epoch [39/235], Loss: 1.0013, Accuracy: 68.41%\n",
      "Epoch [40/235], Loss: 0.9711, Accuracy: 66.47%\n",
      "Epoch [41/235], Loss: 0.8870, Accuracy: 69.81%\n",
      "Epoch [42/235], Loss: 0.9828, Accuracy: 67.41%\n",
      "Epoch [43/235], Loss: 0.8286, Accuracy: 74.05%\n",
      "Epoch [44/235], Loss: 0.8147, Accuracy: 74.51%\n",
      "Epoch [45/235], Loss: 0.8852, Accuracy: 70.91%\n",
      "Epoch [46/235], Loss: 0.6712, Accuracy: 80.13%\n",
      "Epoch [47/235], Loss: 0.6474, Accuracy: 77.19%\n",
      "Epoch [48/235], Loss: 0.8319, Accuracy: 69.70%\n",
      "Epoch [49/235], Loss: 1.2087, Accuracy: 59.93%\n",
      "Epoch [50/235], Loss: 0.9359, Accuracy: 65.99%\n",
      "Epoch [51/235], Loss: 0.6765, Accuracy: 81.67%\n",
      "Epoch [52/235], Loss: 0.6251, Accuracy: 78.65%\n",
      "Epoch [53/235], Loss: 0.5476, Accuracy: 84.62%\n",
      "Epoch [54/235], Loss: 0.5869, Accuracy: 81.78%\n",
      "Epoch [55/235], Loss: 0.5459, Accuracy: 83.77%\n",
      "Epoch [56/235], Loss: 0.6838, Accuracy: 79.49%\n",
      "Epoch [57/235], Loss: 0.6087, Accuracy: 80.83%\n",
      "Epoch [58/235], Loss: 0.4863, Accuracy: 85.44%\n",
      "Epoch [59/235], Loss: 0.5309, Accuracy: 83.15%\n",
      "Epoch [60/235], Loss: 0.5165, Accuracy: 83.27%\n",
      "Epoch [61/235], Loss: 0.5225, Accuracy: 83.44%\n",
      "Epoch [62/235], Loss: 0.5760, Accuracy: 79.97%\n",
      "Epoch [63/235], Loss: 0.5864, Accuracy: 79.90%\n",
      "Epoch [64/235], Loss: 0.4578, Accuracy: 85.44%\n",
      "Epoch [65/235], Loss: 0.5366, Accuracy: 81.63%\n",
      "Epoch [66/235], Loss: 0.4636, Accuracy: 85.02%\n",
      "Epoch [67/235], Loss: 0.4361, Accuracy: 86.57%\n",
      "Epoch [68/235], Loss: 0.4250, Accuracy: 86.40%\n",
      "Epoch [69/235], Loss: 0.5160, Accuracy: 82.60%\n",
      "Epoch [70/235], Loss: 0.6355, Accuracy: 79.05%\n",
      "Epoch [71/235], Loss: 0.4590, Accuracy: 86.23%\n",
      "Epoch [72/235], Loss: 0.3834, Accuracy: 88.62%\n",
      "Epoch [73/235], Loss: 0.3541, Accuracy: 90.10%\n",
      "Epoch [74/235], Loss: 0.3730, Accuracy: 88.56%\n",
      "Epoch [75/235], Loss: 0.3611, Accuracy: 89.25%\n",
      "Epoch [76/235], Loss: 0.3937, Accuracy: 87.54%\n",
      "Epoch [77/235], Loss: 0.4583, Accuracy: 85.52%\n",
      "Epoch [78/235], Loss: 0.5186, Accuracy: 81.55%\n",
      "Epoch [79/235], Loss: 0.6961, Accuracy: 78.32%\n",
      "Epoch [80/235], Loss: 0.7746, Accuracy: 74.76%\n",
      "Epoch [81/235], Loss: 0.8533, Accuracy: 80.11%\n",
      "Epoch [82/235], Loss: 0.4944, Accuracy: 86.28%\n",
      "Epoch [83/235], Loss: 0.3837, Accuracy: 90.33%\n",
      "Epoch [84/235], Loss: 0.3471, Accuracy: 90.34%\n",
      "Epoch [85/235], Loss: 0.3457, Accuracy: 90.53%\n",
      "Epoch [86/235], Loss: 0.3170, Accuracy: 90.78%\n",
      "Epoch [87/235], Loss: 0.3318, Accuracy: 90.59%\n",
      "Epoch [88/235], Loss: 0.3438, Accuracy: 89.64%\n",
      "Epoch [89/235], Loss: 0.3869, Accuracy: 87.73%\n",
      "Epoch [90/235], Loss: 0.3782, Accuracy: 88.03%\n",
      "Epoch [91/235], Loss: 0.3389, Accuracy: 89.84%\n",
      "Epoch [92/235], Loss: 0.3680, Accuracy: 88.14%\n",
      "Epoch [93/235], Loss: 0.3550, Accuracy: 88.92%\n",
      "Epoch [94/235], Loss: 0.3025, Accuracy: 91.03%\n",
      "Epoch [95/235], Loss: 0.2993, Accuracy: 91.23%\n",
      "Epoch [96/235], Loss: 0.3258, Accuracy: 89.86%\n",
      "Epoch [97/235], Loss: 0.3114, Accuracy: 90.69%\n",
      "Epoch [98/235], Loss: 0.3336, Accuracy: 89.25%\n",
      "Epoch [99/235], Loss: 0.3396, Accuracy: 89.75%\n",
      "Epoch [100/235], Loss: 0.2854, Accuracy: 91.46%\n",
      "Epoch [101/235], Loss: 0.2797, Accuracy: 91.48%\n",
      "Epoch [102/235], Loss: 0.2755, Accuracy: 92.15%\n",
      "Epoch [103/235], Loss: 0.3140, Accuracy: 89.90%\n",
      "Epoch [104/235], Loss: 0.2923, Accuracy: 91.20%\n",
      "Epoch [105/235], Loss: 0.2737, Accuracy: 91.64%\n",
      "Epoch [106/235], Loss: 0.3138, Accuracy: 90.20%\n",
      "Epoch [107/235], Loss: 0.3244, Accuracy: 89.39%\n",
      "Epoch [108/235], Loss: 0.2700, Accuracy: 91.92%\n",
      "Epoch [109/235], Loss: 0.2753, Accuracy: 91.49%\n",
      "Epoch [110/235], Loss: 0.2532, Accuracy: 92.47%\n",
      "Epoch [111/235], Loss: 0.2704, Accuracy: 91.44%\n",
      "Epoch [112/235], Loss: 0.2984, Accuracy: 90.80%\n",
      "Epoch [113/235], Loss: 0.3478, Accuracy: 88.67%\n",
      "Epoch [114/235], Loss: 0.4613, Accuracy: 84.15%\n",
      "Epoch [115/235], Loss: 0.5927, Accuracy: 83.59%\n",
      "Epoch [116/235], Loss: 0.4066, Accuracy: 86.46%\n",
      "Epoch [117/235], Loss: 0.2749, Accuracy: 92.19%\n",
      "Epoch [118/235], Loss: 0.2612, Accuracy: 92.58%\n",
      "Epoch [119/235], Loss: 0.2524, Accuracy: 92.85%\n",
      "Epoch [120/235], Loss: 0.2420, Accuracy: 92.94%\n",
      "Epoch [121/235], Loss: 0.2438, Accuracy: 92.80%\n",
      "Epoch [122/235], Loss: 0.2397, Accuracy: 93.00%\n",
      "Epoch [123/235], Loss: 0.2547, Accuracy: 92.46%\n",
      "Epoch [124/235], Loss: 0.2367, Accuracy: 93.05%\n",
      "Epoch [125/235], Loss: 0.2503, Accuracy: 92.62%\n",
      "Epoch [126/235], Loss: 0.2881, Accuracy: 90.88%\n",
      "Epoch [127/235], Loss: 0.2496, Accuracy: 92.31%\n",
      "Epoch [128/235], Loss: 0.2406, Accuracy: 92.92%\n",
      "Epoch [129/235], Loss: 0.2295, Accuracy: 93.07%\n",
      "Epoch [130/235], Loss: 0.2312, Accuracy: 93.14%\n",
      "Epoch [131/235], Loss: 0.2341, Accuracy: 92.90%\n",
      "Epoch [132/235], Loss: 0.2217, Accuracy: 93.31%\n",
      "Epoch [133/235], Loss: 0.2204, Accuracy: 93.44%\n",
      "Epoch [134/235], Loss: 0.2161, Accuracy: 93.55%\n",
      "Epoch [135/235], Loss: 0.2209, Accuracy: 93.34%\n",
      "Epoch [136/235], Loss: 0.2449, Accuracy: 92.47%\n",
      "Epoch [137/235], Loss: 0.2538, Accuracy: 92.50%\n",
      "Epoch [138/235], Loss: 0.2260, Accuracy: 93.18%\n",
      "Epoch [139/235], Loss: 0.2424, Accuracy: 92.38%\n",
      "Epoch [140/235], Loss: 0.2541, Accuracy: 92.22%\n",
      "Epoch [141/235], Loss: 0.2348, Accuracy: 92.96%\n",
      "Epoch [142/235], Loss: 0.2199, Accuracy: 93.44%\n",
      "Epoch [143/235], Loss: 0.2100, Accuracy: 93.72%\n",
      "Epoch [144/235], Loss: 0.2221, Accuracy: 93.21%\n",
      "Epoch [145/235], Loss: 0.2163, Accuracy: 93.48%\n",
      "Epoch [146/235], Loss: 0.2301, Accuracy: 92.84%\n",
      "Epoch [147/235], Loss: 0.2393, Accuracy: 92.51%\n",
      "Epoch [148/235], Loss: 0.2249, Accuracy: 93.20%\n",
      "Epoch [149/235], Loss: 0.2552, Accuracy: 91.98%\n",
      "Epoch [150/235], Loss: 0.2178, Accuracy: 93.58%\n",
      "Epoch [151/235], Loss: 0.2268, Accuracy: 92.91%\n",
      "Epoch [152/235], Loss: 0.2093, Accuracy: 93.84%\n",
      "Epoch [153/235], Loss: 0.2025, Accuracy: 94.19%\n",
      "Epoch [154/235], Loss: 0.2075, Accuracy: 93.79%\n",
      "Epoch [155/235], Loss: 0.1940, Accuracy: 94.28%\n",
      "Epoch [156/235], Loss: 0.1948, Accuracy: 94.22%\n",
      "Epoch [157/235], Loss: 0.1973, Accuracy: 94.16%\n",
      "Epoch [158/235], Loss: 0.1986, Accuracy: 94.01%\n",
      "Epoch [159/235], Loss: 0.1921, Accuracy: 94.15%\n",
      "Epoch [160/235], Loss: 0.1941, Accuracy: 94.21%\n",
      "Epoch [161/235], Loss: 0.1942, Accuracy: 94.06%\n",
      "Epoch [162/235], Loss: 0.2031, Accuracy: 93.87%\n",
      "Epoch [163/235], Loss: 0.2405, Accuracy: 92.21%\n",
      "Epoch [164/235], Loss: 0.2329, Accuracy: 92.92%\n",
      "Epoch [165/235], Loss: 0.1998, Accuracy: 93.93%\n",
      "Epoch [166/235], Loss: 0.1927, Accuracy: 94.17%\n",
      "Epoch [167/235], Loss: 0.1822, Accuracy: 94.55%\n",
      "Epoch [168/235], Loss: 0.2100, Accuracy: 93.56%\n",
      "Epoch [169/235], Loss: 0.2096, Accuracy: 93.36%\n",
      "Epoch [170/235], Loss: 0.2506, Accuracy: 92.62%\n",
      "Epoch [171/235], Loss: 0.2103, Accuracy: 93.58%\n",
      "Epoch [172/235], Loss: 0.1853, Accuracy: 94.44%\n",
      "Epoch [173/235], Loss: 0.1936, Accuracy: 94.24%\n",
      "Epoch [174/235], Loss: 0.1983, Accuracy: 94.04%\n",
      "Epoch [175/235], Loss: 0.2064, Accuracy: 93.77%\n",
      "Epoch [176/235], Loss: 0.1976, Accuracy: 93.99%\n",
      "Epoch [177/235], Loss: 0.1831, Accuracy: 94.65%\n",
      "Epoch [178/235], Loss: 0.2045, Accuracy: 93.72%\n",
      "Epoch [179/235], Loss: 0.1775, Accuracy: 94.75%\n",
      "Epoch [180/235], Loss: 0.1734, Accuracy: 94.93%\n",
      "Epoch [181/235], Loss: 0.2063, Accuracy: 93.67%\n",
      "Epoch [182/235], Loss: 0.1832, Accuracy: 94.46%\n",
      "Epoch [183/235], Loss: 0.1690, Accuracy: 95.06%\n",
      "Epoch [184/235], Loss: 0.1992, Accuracy: 94.02%\n",
      "Epoch [185/235], Loss: 0.1705, Accuracy: 94.89%\n",
      "Epoch [186/235], Loss: 0.1719, Accuracy: 94.99%\n",
      "Epoch [187/235], Loss: 0.1852, Accuracy: 94.43%\n",
      "Epoch [188/235], Loss: 0.1677, Accuracy: 95.00%\n",
      "Epoch [189/235], Loss: 0.1686, Accuracy: 94.97%\n",
      "Epoch [190/235], Loss: 0.1795, Accuracy: 94.48%\n",
      "Epoch [191/235], Loss: 0.2111, Accuracy: 93.41%\n",
      "Epoch [192/235], Loss: 0.1854, Accuracy: 94.30%\n",
      "Epoch [193/235], Loss: 0.2016, Accuracy: 93.64%\n",
      "Epoch [194/235], Loss: 0.2159, Accuracy: 92.83%\n",
      "Epoch [195/235], Loss: 0.1654, Accuracy: 95.14%\n",
      "Epoch [196/235], Loss: 0.1672, Accuracy: 95.16%\n",
      "Epoch [197/235], Loss: 0.1619, Accuracy: 95.28%\n",
      "Epoch [198/235], Loss: 0.1617, Accuracy: 95.19%\n",
      "Epoch [199/235], Loss: 0.1578, Accuracy: 95.44%\n",
      "Epoch [200/235], Loss: 0.1783, Accuracy: 94.65%\n",
      "Epoch [201/235], Loss: 0.1757, Accuracy: 94.64%\n",
      "Epoch [202/235], Loss: 0.1668, Accuracy: 94.92%\n",
      "Epoch [203/235], Loss: 0.1957, Accuracy: 94.01%\n",
      "Epoch [204/235], Loss: 0.1548, Accuracy: 95.42%\n",
      "Epoch [205/235], Loss: 0.1636, Accuracy: 95.09%\n",
      "Epoch [206/235], Loss: 0.1534, Accuracy: 95.36%\n",
      "Epoch [207/235], Loss: 0.1840, Accuracy: 94.38%\n",
      "Epoch [208/235], Loss: 0.1698, Accuracy: 94.92%\n",
      "Epoch [209/235], Loss: 0.1691, Accuracy: 94.90%\n",
      "Epoch [210/235], Loss: 0.1639, Accuracy: 95.00%\n",
      "Epoch [211/235], Loss: 0.1569, Accuracy: 95.29%\n",
      "Epoch [212/235], Loss: 0.1541, Accuracy: 95.43%\n",
      "Epoch [213/235], Loss: 0.1605, Accuracy: 95.17%\n",
      "Epoch [214/235], Loss: 0.1521, Accuracy: 95.41%\n",
      "Epoch [215/235], Loss: 0.1522, Accuracy: 95.49%\n",
      "Epoch [216/235], Loss: 0.1523, Accuracy: 95.39%\n",
      "Epoch [217/235], Loss: 0.1663, Accuracy: 94.97%\n",
      "Epoch [218/235], Loss: 0.1527, Accuracy: 95.46%\n",
      "Epoch [219/235], Loss: 0.1524, Accuracy: 95.57%\n",
      "Epoch [220/235], Loss: 0.1548, Accuracy: 95.49%\n",
      "Epoch [221/235], Loss: 0.1594, Accuracy: 95.24%\n",
      "Epoch [222/235], Loss: 0.1508, Accuracy: 95.54%\n",
      "Epoch [223/235], Loss: 0.1496, Accuracy: 95.60%\n",
      "Epoch [224/235], Loss: 0.1565, Accuracy: 95.39%\n",
      "Epoch [225/235], Loss: 0.1491, Accuracy: 95.54%\n",
      "Epoch [226/235], Loss: 0.1681, Accuracy: 95.04%\n",
      "Epoch [227/235], Loss: 0.1539, Accuracy: 95.36%\n",
      "Epoch [228/235], Loss: 0.1463, Accuracy: 95.71%\n",
      "Epoch [229/235], Loss: 0.1478, Accuracy: 95.55%\n",
      "Epoch [230/235], Loss: 0.1570, Accuracy: 95.17%\n",
      "Epoch [231/235], Loss: 0.1530, Accuracy: 95.42%\n",
      "Epoch [232/235], Loss: 0.1437, Accuracy: 95.66%\n",
      "Epoch [233/235], Loss: 0.1551, Accuracy: 95.27%\n",
      "Epoch [234/235], Loss: 0.1676, Accuracy: 94.92%\n",
      "Epoch [235/235], Loss: 0.1668, Accuracy: 94.86%\n",
      "Test Accuracy: 95.08%\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision import datasets, transforms\n",
    "import matplotlib.pyplot as plt\n",
    "import copy\n",
    "import random\n",
    "import pandas as pd\n",
    "# 1. 设置随机种子（确保结果可复现）\n",
    "def set_seed(seed):\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.cuda.manual_seed_all(seed)\n",
    "    np.random.seed(seed)\n",
    "    random.seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = False\n",
    "\n",
    "# 设置随机种子\n",
    "seed = 42\n",
    "set_seed(seed)\n",
    "# 2. 准备数据集\n",
    "# 定义数据预处理\n",
    "transform = transforms.Compose([\n",
    "    transforms.ToTensor(),  # 将图像转换为张量\n",
    "    transforms.Normalize((0.5,), (0.5,))  # 归一化到 [-1, 1]\n",
    "])\n",
    "\n",
    "# 加载MNIST数据集\n",
    "train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n",
    "test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n",
    "\n",
    "# 创建数据加载器\n",
    "train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False)\n",
    "\n",
    "# 3. 定义CNN模型\n",
    "class CNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(CNN, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)  # 输入通道1，输出通道32\n",
    "        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)  # 输入通道32，输出通道64\n",
    "        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)  # 池化层\n",
    "        self.fc1 = nn.Linear(64 * 7 * 7, 128)  # 全连接层\n",
    "        self.fc2 = nn.Linear(128, 10)  # 输出层，10个类别\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.pool(F.relu(self.conv1(x)))  # 卷积 -> ReLU -> 池化\n",
    "        x = self.pool(F.relu(self.conv2(x)))  # 卷积 -> ReLU -> 池化\n",
    "        x = x.view(-1, 64 * 7 * 7)  # 展平\n",
    "        x = F.relu(self.fc1(x))  # 全连接 -> ReLU\n",
    "        x = self.fc2(x)  # 输出层\n",
    "        return x\n",
    "    \n",
    "\n",
    "def add_models(model1, model2):\n",
    "    result = CNN()  # 创建一个新的CNN实例\n",
    "    for param1, param2, param_result in zip(model1.parameters(), model2.parameters(), result.parameters()):\n",
    "        param_result.data = param1.data + param2.data  # 逐元素相加\n",
    "    return result\n",
    "    \n",
    "\n",
    "# 定义数乘操作\n",
    "def multiply_model(model, scalar):\n",
    "    result = CNN()  # 创建一个新的CNN实例\n",
    "    for param, param_result in zip(model.parameters(), result.parameters()):\n",
    "        param_result.data = param.data * scalar  # 逐元素乘以标量\n",
    "    return result\n",
    "\n",
    "# 4. 自定义优化算法\n",
    "class CustomOptimizer(optim.Optimizer):\n",
    "    def __init__(self, params, lr=0.01):\n",
    "        defaults = dict(lr=lr)\n",
    "        super(CustomOptimizer, self).__init__(params, defaults)\n",
    "\n",
    "    def step(self, closure=None):\n",
    "        loss = None\n",
    "        if closure is not None:\n",
    "            loss = closure()\n",
    "\n",
    "        for group in self.param_groups:\n",
    "            for p in group['params']:\n",
    "                if p.grad is None:\n",
    "                    continue\n",
    "                # 获取梯度\n",
    "                grad = p.grad.data\n",
    "                # 更新参数：theta = theta - lr * grad\n",
    "                p.data.add_(-group['lr'], grad)\n",
    "\n",
    "        return loss\n",
    "\n",
    "# 5. 定义优化器接口\n",
    "def get_optimizer(model, optimizer_name, learning_rate):\n",
    "    if optimizer_name == 'adam':\n",
    "        return optim.Adam(model.parameters(), lr=learning_rate)\n",
    "    elif optimizer_name == 'sgd':\n",
    "        return optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)\n",
    "    elif optimizer_name == 'rmsprop':\n",
    "        return optim.RMSprop(model.parameters(), lr=learning_rate)\n",
    "    elif optimizer_name == 'custom':\n",
    "        return CustomOptimizer(model.parameters(), lr=learning_rate)\n",
    "    else:\n",
    "        raise ValueError(f\"Unsupported optimizer: {optimizer_name}\")\n",
    "\n",
    "# 6. 训练模型\n",
    "def train_model_GD(model, train_loader, test_loader, optimizer_name, learning_rate, num_epochs):\n",
    "    # 定义损失函数和优化器\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = get_optimizer(model, optimizer_name, learning_rate)\n",
    "\n",
    "    train_losses = []\n",
    "    train_accuracies = []\n",
    "    k=0\n",
    "    for epoch in range(num_epochs):\n",
    "        model.train()  # 设置模型为训练模式\n",
    "        running_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "\n",
    "        for images, labels in train_loader:\n",
    "            # 前向传播\n",
    "            k+=1\n",
    "            outputs = model(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "\n",
    "            # 反向传播和优化\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            running_loss = 0.0\n",
    "            correct = 0\n",
    "            total = 0\n",
    "            for images, labels in train_loader:\n",
    "                outputs = model(images)\n",
    "                loss = criterion(outputs, labels)\n",
    "                running_loss += loss.item()\n",
    "                _, predicted = torch.max(outputs.data, 1)\n",
    "                total += labels.size(0)\n",
    "                correct += (predicted == labels).sum().item()\n",
    "            # 计算每个epoch的损失和准确率\n",
    "            epoch_loss = running_loss / len(train_loader)\n",
    "            epoch_accuracy = 100 * correct / total\n",
    "            train_losses.append(epoch_loss)\n",
    "            train_accuracies.append(epoch_accuracy)\n",
    "    \n",
    "            print(f\"Epoch [{k}/{len(train_loader)*num_epochs}], Loss: {epoch_loss:.4f}, Accuracy: {epoch_accuracy:.2f}%\")\n",
    "            if epoch_accuracy > 99:\n",
    "                break\n",
    "    pd.DataFrame(train_losses).to_excel('loss_GD.xlsx')  \n",
    "    pd.DataFrame(train_accuracies).to_excel('accuracies_GD.xlsx')  \n",
    "    # 测试模型\n",
    "    model.eval()  # 设置模型为评估模式\n",
    "    test_correct = 0\n",
    "    test_total = 0\n",
    "\n",
    "    with torch.no_grad():  # 禁用梯度计算\n",
    "        for images, labels in test_loader:\n",
    "            outputs = model(images)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            test_total += labels.size(0)\n",
    "            test_correct += (predicted == labels).sum().item()\n",
    "\n",
    "    test_accuracy = 100 * test_correct / test_total\n",
    "    print(f\"Test Accuracy: {test_accuracy:.2f}%\")\n",
    "\n",
    "    ## 可视化训练过程\n",
    "    #plt.figure(figsize=(12, 4))\n",
    "    #plt.subplot(1, 2, 1)\n",
    "    #plt.plot(train_losses, label='Training Loss')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Loss')\n",
    "    #plt.legend()\n",
    "#\n",
    "    #plt.subplot(1, 2, 2)\n",
    "    #plt.plot(train_accuracies, label='Training Accuracy')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Accuracy')\n",
    "    #plt.legend()\n",
    "    #plt.show()\n",
    "\n",
    "def train_model_NSA(model, train_loader, test_loader, optimizer_name, learning_rate, num_epochs):\n",
    "    # 定义损失函数和优化器\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = get_optimizer(model, optimizer_name, learning_rate)\n",
    "\n",
    "    train_losses = []\n",
    "    train_accuracies = []\n",
    "\n",
    "    model_x = copy.deepcopy(model)\n",
    "    model_y = copy.deepcopy(model)\n",
    "    model_z = copy.deepcopy(model)\n",
    "\n",
    "    optimizer_x = get_optimizer(model_x, optimizer_name, learning_rate)\n",
    "    optimizer_y = get_optimizer(model_y, optimizer_name, learning_rate)\n",
    "    optimizer_z = get_optimizer(model_z, optimizer_name, learning_rate)\n",
    "\n",
    "    k = 0\n",
    "    for epoch in range(num_epochs):\n",
    "        model_x.train()\n",
    "        model_y.train()  # 设置模型为训练模式\n",
    "        model_z.train()  # 设置模型为训练模式\n",
    "        running_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "\n",
    "        for images, labels in train_loader:\n",
    "            k+=1\n",
    "            alpha = 5/(k+5)\n",
    "            with torch.no_grad():\n",
    "                for param_y, param_x, param_z in zip(model_y.parameters(),model_x.parameters(), model_z.parameters()):\n",
    "                    # 更新 model_y 的参数\n",
    "                    param_y.data =  (1 - alpha)*param_x.data + alpha*param_z.data\n",
    "        \n",
    "            # 前向传播\n",
    "            outputs = model_y(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "\n",
    "            # 反向传播和优化\n",
    "            optimizer_y.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer_y.step()\n",
    "\n",
    "            for param_y, param_x in zip(model_y.parameters(),model_x.parameters()):\n",
    "                # 更新 model_y 的参数\n",
    "                param_x.data =  param_y.data\n",
    "\n",
    "            divisor = alpha\n",
    "            # 将处理后的梯度赋值给 model_z\n",
    "            for param_z, param_y in zip(model_z.parameters(), model_y.parameters()):\n",
    "                if param_y.grad is not None:\n",
    "                    param_z.grad = param_y.grad.div_(divisor).clone()  # 复制梯度\n",
    "            optimizer_z.step()\n",
    "   \n",
    "            # 统计损失和准确率\n",
    "            running_loss = 0.0\n",
    "            correct = 0\n",
    "            total = 0\n",
    "            for images, labels in train_loader:\n",
    "                outputs = model_x(images)\n",
    "                loss = criterion(outputs, labels)\n",
    "                running_loss += loss.item()\n",
    "                _, predicted = torch.max(outputs.data, 1)\n",
    "                total += labels.size(0)\n",
    "                correct += (predicted == labels).sum().item()\n",
    "            # 计算每个epoch的损失和准确率\n",
    "            epoch_loss = running_loss / len(train_loader)\n",
    "            epoch_accuracy = 100 * correct / total\n",
    "            train_losses.append(epoch_loss)\n",
    "            train_accuracies.append(epoch_accuracy)\n",
    "    \n",
    "            print(f\"Epoch [{k}/{len(train_loader)*num_epochs}], Loss: {epoch_loss:.4f}, Accuracy: {epoch_accuracy:.2f}%\")\n",
    "            if epoch_accuracy > 99:\n",
    "                break\n",
    "    pd.DataFrame(train_losses).to_excel('loss_NSA.xlsx')  \n",
    "    pd.DataFrame(train_accuracies).to_excel('accuracies_NSA.xlsx')  \n",
    "    # 测试模型\n",
    "    model_x.eval()  # 设置模型为评估模式\n",
    "    test_correct = 0\n",
    "    test_total = 0\n",
    "    with torch.no_grad():  # 禁用梯度计算\n",
    "        for images, labels in test_loader:\n",
    "            outputs = model_x(images)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            test_total += labels.size(0)\n",
    "            test_correct += (predicted == labels).sum().item()\n",
    "\n",
    "    test_accuracy = 100 * test_correct / test_total\n",
    "    print(f\"Test Accuracy: {test_accuracy:.2f}%\")\n",
    "\n",
    "    ## 可视化训练过程\n",
    "    #plt.figure(figsize=(12, 4))\n",
    "    #plt.subplot(1, 2, 1)\n",
    "    #plt.plot(train_losses, label='Training Loss')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Loss')\n",
    "    #plt.legend()\n",
    "#\n",
    "    #plt.subplot(1, 2, 2)\n",
    "    #plt.plot(train_accuracies, label='Training Accuracy')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Accuracy')\n",
    "    #plt.legend()\n",
    "    #plt.show()\n",
    "\n",
    "def train_model_NSA_plus(model, train_loader, test_loader, optimizer_name, learning_rate, num_epochs):\n",
    "    # 定义损失函数和优化器\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = get_optimizer(model, optimizer_name, learning_rate)\n",
    "\n",
    "    train_losses = []\n",
    "    train_accuracies = []\n",
    "\n",
    "    model_x = copy.deepcopy(model)\n",
    "    model_y = copy.deepcopy(model)\n",
    "    model_z = copy.deepcopy(model)\n",
    "\n",
    "    optimizer_x = get_optimizer(model_x, optimizer_name, learning_rate)\n",
    "    optimizer_y = get_optimizer(model_y, optimizer_name, learning_rate)\n",
    "    optimizer_z = get_optimizer(model_z, optimizer_name, learning_rate)\n",
    "\n",
    "    k = 0\n",
    "    for epoch in range(num_epochs):\n",
    "        model_x.train()\n",
    "        model_y.train()  # 设置模型为训练模式\n",
    "        model_z.train()  # 设置模型为训练模式\n",
    "        running_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "        for images, labels in train_loader:\n",
    "            k+=1\n",
    "            alpha = 5/(k+5)\n",
    "            with torch.no_grad():\n",
    "                for param_y, param_x, param_z in zip(model_y.parameters(),model_x.parameters(), model_z.parameters()):\n",
    "                    # 更新 model_y 的参数\n",
    "                    param_y.data =  (1 - alpha)*param_x.data + alpha*param_z.data\n",
    "\n",
    "            outputs = model_y(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "            optimizer_y.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer_y.step()\n",
    "\n",
    "            # 前向传播\n",
    "            outputs = model_x(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "            optimizer_x.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer_x.step()\n",
    "\n",
    "            #比较前向传播\n",
    "            outputs_x = model_x(images)\n",
    "            loss_x = criterion(outputs_x, labels)\n",
    "            outputs_y = model_y(images)\n",
    "            loss_y = criterion(outputs_y, labels)\n",
    "\n",
    "            with torch.no_grad():\n",
    "                for param_x,param_y in zip(model_x.parameters(),model_y.parameters()):\n",
    "                    # 更新 model_y 的参数\n",
    "                    if loss_x < loss_y:\n",
    "                        continue\n",
    "                    else:\n",
    "                        param_x.data =  param_y.data.clone()\n",
    "\n",
    "            outputs_x = model_x(images)\n",
    "            loss_x = criterion(outputs_x, labels)\n",
    "        \n",
    "            divisor = alpha\n",
    "            # 将处理后的梯度赋值给 model_z\n",
    "            for param_z, param_y in zip(model_z.parameters(), model_y.parameters()):\n",
    "                if param_y.grad is not None:\n",
    "                    param_z.grad = param_y.grad.div_(divisor).clone()  # 复制梯度\n",
    "            optimizer_z.step()\n",
    "\n",
    "            running_loss = 0.0\n",
    "            correct = 0\n",
    "            total = 0\n",
    "            # 统计损失和准确率\n",
    "            for images, labels in train_loader:\n",
    "                outputs = model_x(images)\n",
    "                loss = criterion(outputs, labels)\n",
    "                running_loss += loss.item()\n",
    "                _, predicted = torch.max(outputs.data, 1)\n",
    "                total += labels.size(0)\n",
    "                correct += (predicted == labels).sum().item()\n",
    "            # 计算每个epoch的损失和准确率\n",
    "            epoch_loss = running_loss / len(train_loader)\n",
    "            epoch_accuracy = 100 * correct / total\n",
    "            train_losses.append(epoch_loss)\n",
    "            train_accuracies.append(epoch_accuracy)\n",
    "            print(f\"Epoch [{k}/{len(train_loader)*num_epochs}], Loss: {epoch_loss:.4f}, Accuracy: {epoch_accuracy:.2f}%\")\n",
    "            if epoch_accuracy > 99:\n",
    "                break\n",
    "           \n",
    "    pd.DataFrame(train_losses).to_excel('loss_NSA_plus.xlsx')  \n",
    "    pd.DataFrame(train_accuracies).to_excel('accuracies_NSA_plus.xlsx')  \n",
    "    # 测试模型\n",
    "    model_x.eval()  # 设置模型为评估模式\n",
    "    test_correct = 0\n",
    "    test_total = 0\n",
    "    with torch.no_grad():  # 禁用梯度计算\n",
    "        for images, labels in test_loader:\n",
    "            outputs = model_x(images)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            test_total += labels.size(0)\n",
    "            test_correct += (predicted == labels).sum().item()\n",
    "\n",
    "    test_accuracy = 100 * test_correct / test_total\n",
    "    print(f\"Test Accuracy: {test_accuracy:.2f}%\")\n",
    "    \n",
    "    ## 可视化训练过程\n",
    "    #plt.figure(figsize=(12, 4))\n",
    "    #plt.subplot(1, 2, 1)\n",
    "    #plt.plot(train_losses, label='Training Loss')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Loss')\n",
    "    #plt.legend()\n",
    "#\n",
    "    #plt.subplot(1, 2, 2)\n",
    "    #plt.plot(train_accuracies, label='Training Accuracy')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Accuracy')\n",
    "    #plt.legend()\n",
    "    #plt.show()\n",
    "\n",
    "# 7. 初始化模型并训练\n",
    "model_0 = CNN()\n",
    "model_GD = copy.deepcopy(model_0)\n",
    "model_NSA = copy.deepcopy(model_0)\n",
    "model_NSA_plus = copy.deepcopy(model_0)\n",
    "#model_GD = CNN()\n",
    "#train_model_NSA_plus(model_NSA_plus, train_loader, test_loader, optimizer_name='custom', learning_rate=0.1, num_epochs=1)\n",
    "train_model_NSA(model_NSA, train_loader, test_loader, optimizer_name='custom', learning_rate=0.1, num_epochs=1)\n",
    "train_model_GD(model_GD, train_loader, test_loader, optimizer_name='custom', learning_rate=0.1, num_epochs=1)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_12488\\1002794318.py:91: UserWarning: This overload of add_ is deprecated:\n",
      "\tadd_(Number alpha, Tensor other)\n",
      "Consider using one of the following signatures instead:\n",
      "\tadd_(Tensor other, *, Number alpha) (Triggered internally at ..\\torch\\csrc\\utils\\python_arg_parser.cpp:1630.)\n",
      "  p.data.add_(-group['lr'], grad)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[-0.0232, -0.0272, -0.0435,  ...,  0.0516,  0.0145, -0.1154],\n",
      "        [-0.0494,  0.0121, -0.0558,  ...,  0.0572,  0.0402, -0.1116],\n",
      "        [-0.0366,  0.0012, -0.0511,  ...,  0.0559,  0.0256, -0.1305],\n",
      "        ...,\n",
      "        [-0.0245, -0.0245,  0.0060,  ...,  0.0588,  0.0077, -0.1258],\n",
      "        [-0.0115, -0.0483, -0.0425,  ...,  0.0823,  0.0069, -0.1145],\n",
      "        [-0.0480, -0.0016, -0.0545,  ...,  0.0695,  0.0240, -0.1321]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [1/235], Loss: 2.2959, Accuracy: 10.01%\n",
      "tensor([[-0.0711,  0.0349, -0.1457,  ...,  0.0814, -0.0330, -0.1516],\n",
      "        [-0.0634,  0.0114, -0.1283,  ...,  0.0736, -0.0469, -0.1596],\n",
      "        [-0.0703,  0.0360, -0.1443,  ...,  0.0854, -0.0405, -0.1195],\n",
      "        ...,\n",
      "        [-0.0323,  0.0058, -0.1433,  ...,  0.1099, -0.0519, -0.1342],\n",
      "        [-0.0403, -0.0005, -0.1223,  ...,  0.0937, -0.0589, -0.1695],\n",
      "        [-0.0934,  0.0867, -0.1041,  ...,  0.0383, -0.0363, -0.1324]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [2/235], Loss: 2.2865, Accuracy: 20.07%\n",
      "tensor([[-0.0514,  0.1452, -0.2386,  ..., -0.0046,  0.0453, -0.2071],\n",
      "        [-0.0892,  0.1921, -0.2282,  ...,  0.0019, -0.0025, -0.1805],\n",
      "        [-0.0569,  0.1855, -0.1591,  ..., -0.0102,  0.0263, -0.2122],\n",
      "        ...,\n",
      "        [-0.0539,  0.2105, -0.2109,  ...,  0.0334,  0.0206, -0.1748],\n",
      "        [-0.0605,  0.1648, -0.2007,  ...,  0.0077, -0.0107, -0.2237],\n",
      "        [-0.0706,  0.1841, -0.2250,  ...,  0.0160,  0.0304, -0.1924]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [3/235], Loss: 2.2786, Accuracy: 14.03%\n",
      "tensor([[-0.1071,  0.2488, -0.2242,  ...,  0.0200,  0.0572, -0.0874],\n",
      "        [-0.0568,  0.1639, -0.1958,  ...,  0.0363,  0.0221, -0.0622],\n",
      "        [-0.1143,  0.2496, -0.1973,  ...,  0.0086,  0.0215, -0.0691],\n",
      "        ...,\n",
      "        [-0.0729,  0.1844, -0.2198,  ...,  0.0045,  0.0253, -0.0538],\n",
      "        [-0.0697,  0.1903, -0.2443,  ..., -0.0159,  0.0339, -0.0795],\n",
      "        [-0.1214,  0.2396, -0.1971,  ..., -0.0252,  0.0424, -0.0951]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [4/235], Loss: 2.2667, Accuracy: 11.35%\n",
      "tensor([[-0.1086, -0.0800, -0.1104,  ...,  0.1021, -0.0920,  0.0919],\n",
      "        [-0.0434, -0.1023, -0.1046,  ...,  0.0823, -0.0974,  0.0182],\n",
      "        [-0.0040, -0.1280, -0.0852,  ...,  0.0944, -0.0989,  0.0369],\n",
      "        ...,\n",
      "        [-0.0425, -0.0718, -0.1067,  ...,  0.1299, -0.0837,  0.0487],\n",
      "        [-0.0528, -0.1331, -0.1228,  ...,  0.0612, -0.0694,  0.0565],\n",
      "        [-0.1187, -0.0643, -0.0782,  ...,  0.0844, -0.0659,  0.0373]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [5/235], Loss: 2.2551, Accuracy: 21.63%\n",
      "tensor([[-0.0861,  0.0313, -0.1592,  ...,  0.1601, -0.1581,  0.1250],\n",
      "        [-0.1220,  0.0738, -0.1772,  ...,  0.2151, -0.1583,  0.1777],\n",
      "        [-0.1328,  0.1719, -0.1355,  ...,  0.1644, -0.0801,  0.1177],\n",
      "        ...,\n",
      "        [-0.1140,  0.1301, -0.1567,  ...,  0.2272, -0.1227,  0.1478],\n",
      "        [-0.1009,  0.0488, -0.1734,  ...,  0.1733, -0.1470,  0.1555],\n",
      "        [ 0.0300, -0.0354, -0.1343,  ...,  0.1783, -0.1551,  0.1195]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [6/235], Loss: 2.2430, Accuracy: 13.27%\n",
      "tensor([[-2.4241e-02,  2.8614e-02, -2.4038e-01,  ..., -5.5920e-02,\n",
      "         -3.7796e-02,  1.5001e-01],\n",
      "        [-1.1903e-01,  2.5072e-01, -2.0371e-01,  ...,  1.4870e-04,\n",
      "         -3.7876e-02,  1.0859e-01],\n",
      "        [-2.1115e-03, -1.1137e-02, -2.6135e-01,  ..., -7.8419e-03,\n",
      "         -5.8189e-02,  2.3035e-01],\n",
      "        ...,\n",
      "        [-1.0922e-01,  1.5622e-01, -2.3146e-01,  ..., -7.6431e-02,\n",
      "         -6.2275e-03,  1.5448e-01],\n",
      "        [-9.9865e-02,  1.4100e-01, -1.9201e-01,  ..., -8.8806e-02,\n",
      "         -8.9686e-03,  1.4808e-01],\n",
      "        [-1.2199e-01,  2.7141e-01, -2.1059e-01,  ..., -2.3681e-02,\n",
      "         -1.4342e-02,  1.0971e-01]], grad_fn=<AddmmBackward0>)\n",
      "Epoch [7/235], Loss: 2.2206, Accuracy: 23.16%\n",
      "tensor([[ 0.4087, -0.2464, -0.1995,  ...,  0.0153, -0.0780,  0.0242],\n",
      "        [ 0.1256,  0.0692, -0.1434,  ..., -0.0410, -0.0659, -0.0138],\n",
      "        [ 0.1097,  0.1031, -0.1571,  ...,  0.0466, -0.0742,  0.0337],\n",
      "        ...,\n",
      "        [ 0.1858, -0.0214, -0.1840,  ..., -0.0241, -0.0412,  0.0308],\n",
      "        [ 0.1987, -0.1037, -0.1329,  ..., -0.0974, -0.0528,  0.0273],\n",
      "        [ 0.1132,  0.1109, -0.1490,  ...,  0.0213, -0.0287, -0.0064]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [8/235], Loss: 2.1995, Accuracy: 19.76%\n",
      "tensor([[-0.1639,  0.3785, -0.1412,  ..., -0.1602,  0.0196, -0.0101],\n",
      "        [-0.0661,  0.1666, -0.2281,  ..., -0.2048,  0.0533,  0.0452],\n",
      "        [-0.0725,  0.1128, -0.1891,  ..., -0.2397,  0.0415,  0.1192],\n",
      "        ...,\n",
      "        [ 0.0265, -0.0152, -0.1318,  ..., -0.2630,  0.0411,  0.0273],\n",
      "        [-0.1737,  0.3930, -0.1839,  ..., -0.1284,  0.0497,  0.0176],\n",
      "        [-0.1082,  0.2569, -0.2426,  ..., -0.1010,  0.0638,  0.1023]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [9/235], Loss: 2.1595, Accuracy: 43.62%\n",
      "tensor([[-0.1739,  0.2434, -0.1216,  ..., -0.0035, -0.1728, -0.0867],\n",
      "        [-0.2155,  0.2554, -0.1645,  ..., -0.0234, -0.1058, -0.0432],\n",
      "        [-0.1461,  0.2357,  0.0160,  ..., -0.1706, -0.1281, -0.1940],\n",
      "        ...,\n",
      "        [ 0.0338,  0.1039,  0.1073,  ..., -0.0455, -0.2035, -0.2603],\n",
      "        [-0.1998,  0.4314,  0.0225,  ..., -0.1342, -0.0874, -0.2466],\n",
      "        [ 0.2754, -0.0852,  0.0248,  ..., -0.0429, -0.1868, -0.2653]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [10/235], Loss: 2.1129, Accuracy: 43.46%\n",
      "tensor([[ 0.1744, -0.2197,  0.2053,  ...,  0.1002, -0.0690, -0.1942],\n",
      "        [ 0.3804, -0.3917,  0.2094,  ...,  0.0882, -0.0719, -0.2565],\n",
      "        [-0.3278,  0.3400,  0.1500,  ...,  0.1769, -0.0637, -0.1258],\n",
      "        ...,\n",
      "        [-0.3393,  0.2122,  0.0796,  ...,  0.1396, -0.0708, -0.0616],\n",
      "        [ 0.2573, -0.2337,  0.1670,  ...,  0.0857, -0.0753, -0.2149],\n",
      "        [ 0.0997, -0.0894,  0.2205,  ...,  0.0431, -0.0497, -0.2065]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [11/235], Loss: 2.0515, Accuracy: 55.38%\n",
      "tensor([[-0.2055, -0.1725, -0.1345,  ..., -0.1056,  0.0934,  0.0322],\n",
      "        [-0.1411, -0.2209, -0.1039,  ..., -0.0105, -0.1578, -0.0129],\n",
      "        [-0.4540,  0.4682,  0.0981,  ..., -0.0403, -0.1273, -0.2073],\n",
      "        ...,\n",
      "        [-0.0032, -0.3517,  0.1605,  ..., -0.2792, -0.1324, -0.1749],\n",
      "        [-0.5061,  0.0228, -0.0478,  ..., -0.0846, -0.1077,  0.0897],\n",
      "        [-0.2061,  0.0455, -0.0059,  ..., -0.0473, -0.1303, -0.2174]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [12/235], Loss: 1.9584, Accuracy: 51.18%\n",
      "tensor([[-0.6936,  0.9114, -0.1459,  ...,  0.1953, -0.1341, -0.0907],\n",
      "        [-0.3274, -0.1258, -0.1154,  ..., -0.1807, -0.0939,  0.2335],\n",
      "        [ 0.9378, -0.6246,  0.0963,  ..., -0.3941, -0.0186, -0.4820],\n",
      "        ...,\n",
      "        [-0.6968,  1.0994,  0.1480,  ..., -0.1209, -0.0664, -0.3531],\n",
      "        [-0.3982,  0.6631,  0.1653,  ..., -0.0953, -0.1273, -0.2435],\n",
      "        [ 0.3416, -0.3795,  0.3232,  ..., -0.3898, -0.0409, -0.2670]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [13/235], Loss: 1.8337, Accuracy: 53.86%\n",
      "tensor([[-0.6693,  0.0891, -0.0340,  ..., -0.0803,  0.1374,  0.2619],\n",
      "        [-0.8121,  0.0951, -0.1762,  ..., -0.2183,  0.0689,  0.4058],\n",
      "        [-0.8300,  0.4090, -0.3799,  ...,  0.2586, -0.0132,  0.3389],\n",
      "        ...,\n",
      "        [-0.6127,  0.4121,  0.1447,  ..., -0.5400,  0.0980, -0.1286],\n",
      "        [-0.4036,  0.3663, -0.0383,  ..., -0.3222,  0.4121, -0.1014],\n",
      "        [-1.0250,  0.6937, -0.1463,  ...,  0.2309, -0.0588,  0.2600]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [14/235], Loss: 1.7201, Accuracy: 55.28%\n",
      "tensor([[-0.1026, -0.3173,  0.4653,  ..., -0.2792,  0.4480, -1.0374],\n",
      "        [-1.8264,  1.5042, -0.0216,  ...,  0.6859, -0.1381, -0.4725],\n",
      "        [-1.4306,  0.9513, -0.6625,  ...,  0.7124,  0.1362, -0.1012],\n",
      "        ...,\n",
      "        [-1.4239,  1.3319, -0.4919,  ...,  1.2739, -0.1487, -0.1772],\n",
      "        [ 1.2112, -1.1042, -0.3886,  ...,  0.3303,  0.3125, -0.7502],\n",
      "        [-2.0681,  2.6857, -0.1935,  ...,  0.5021, -0.1117, -0.5107]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [15/235], Loss: 1.7943, Accuracy: 34.47%\n",
      "tensor([[ 2.1176, -3.4363,  0.2507,  ..., -0.6659,  0.9461, -0.5118],\n",
      "        [ 0.8021, -2.9358,  0.6175,  ..., -0.7527,  0.6792, -0.1618],\n",
      "        [ 1.0519, -2.7630,  0.9318,  ..., -1.0498,  0.5544, -0.4501],\n",
      "        ...,\n",
      "        [ 1.2100, -3.1358, -0.4949,  ...,  0.7737,  0.7529,  0.2137],\n",
      "        [ 0.7317, -1.8755,  1.1452,  ..., -0.8191,  0.5971, -0.8240],\n",
      "        [ 1.4660, -3.4658,  0.0878,  ..., -0.4202,  1.1382, -0.0047]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [16/235], Loss: 1.9436, Accuracy: 25.09%\n",
      "tensor([[-1.4082, -0.6644,  0.4625,  ..., -0.0246,  0.1457,  0.3008],\n",
      "        [-1.6325, -0.7265, -0.4674,  ...,  1.4225, -0.0857,  0.9542],\n",
      "        [-1.4108,  0.1942,  1.2331,  ..., -0.0112, -0.0611, -0.3392],\n",
      "        ...,\n",
      "        [-1.1803, -0.7464, -0.3504,  ...,  1.1879, -0.1494,  0.8168],\n",
      "        [-0.9915, -0.5570, -0.3968,  ...,  0.4445,  0.3269,  0.4357],\n",
      "        [-1.7590, -0.7331, -0.5770,  ...,  1.0795,  0.1381,  1.1027]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [17/235], Loss: 1.7059, Accuracy: 60.01%\n",
      "tensor([[-1.2246, -0.7440, -0.8390,  ...,  0.5675,  0.0249,  0.9367],\n",
      "        [-1.0472, -0.3459, -0.8455,  ...,  0.4296,  0.5417,  0.4245],\n",
      "        [-0.8297, -0.5280, -1.0787,  ...,  1.7243,  0.3964,  0.4744],\n",
      "        ...,\n",
      "        [-0.7168, -0.7746, -1.1146,  ...,  0.6475,  0.0786,  0.7201],\n",
      "        [-0.7686, -1.7228, -0.7643,  ...,  0.4874,  0.1299,  1.0495],\n",
      "        [ 0.3154, -2.1622, -0.4313,  ..., -0.1026,  0.4170, -0.1248]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [18/235], Loss: 1.4570, Accuracy: 73.32%\n",
      "tensor([[-1.5738,  0.9228,  0.3335,  ..., -0.2288, -0.3614,  0.1475],\n",
      "        [-0.8095, -0.2279,  1.3831,  ..., -0.3830, -0.7000, -0.1935],\n",
      "        [-1.0936,  0.2629,  1.3909,  ..., -0.9193, -0.5646, -0.5683],\n",
      "        ...,\n",
      "        [-1.8957,  0.1111, -1.1200,  ...,  1.0127, -0.1060,  1.0534],\n",
      "        [ 0.0259, -0.9019,  1.4759,  ..., -0.6901, -0.1047, -0.9754],\n",
      "        [-1.1751, -0.0371,  0.3444,  ..., -0.1118,  0.1171, -0.1015]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [19/235], Loss: 1.3342, Accuracy: 63.93%\n",
      "tensor([[ 1.9960e-01, -2.7800e-01, -1.2197e+00,  ...,  1.3808e+00,\n",
      "          6.7010e-01,  1.3445e-01],\n",
      "        [-1.2973e+00,  1.7978e+00, -9.2992e-01,  ...,  4.7712e-01,\n",
      "          9.5289e-02, -6.6611e-04],\n",
      "        [-2.6677e-01, -1.4375e+00, -2.2257e+00,  ...,  7.5620e-01,\n",
      "          3.9005e-01,  1.4623e+00],\n",
      "        ...,\n",
      "        [-5.0642e-01, -1.0364e+00, -2.1349e+00,  ...,  5.4513e-01,\n",
      "          6.4379e-01,  1.0552e+00],\n",
      "        [ 1.6683e+00, -1.8619e+00, -2.0822e-01,  ..., -1.5328e+00,\n",
      "          2.4282e-01, -7.9126e-01],\n",
      "        [ 3.4187e-02, -4.1313e-01,  5.8908e-01,  ..., -9.1239e-01,\n",
      "          6.2188e-02, -3.5831e-01]], grad_fn=<AddmmBackward0>)\n",
      "Epoch [20/235], Loss: 1.2587, Accuracy: 66.00%\n",
      "tensor([[ 0.5258, -3.3689,  1.9635,  ..., -0.3477, -0.6459, -1.0490],\n",
      "        [-1.8144,  1.2215,  0.1892,  ..., -0.3561,  0.9061, -0.4130],\n",
      "        [-2.1550, -1.1092,  1.1294,  ..., -0.1471,  0.9007, -0.7498],\n",
      "        ...,\n",
      "        [-2.2016, -0.3505, -1.0623,  ...,  0.1320,  1.4134,  0.7712],\n",
      "        [-1.5037, -0.1672, -0.1555,  ...,  0.4229,  1.4176, -0.0962],\n",
      "        [-1.5863, -0.4457,  1.1311,  ..., -1.1130,  0.2458, -0.3082]],\n",
      "       grad_fn=<AddmmBackward0>)\n",
      "Epoch [21/235], Loss: 1.1348, Accuracy: 73.46%\n",
      "tensor([[-0.5232, -0.7701, -0.4955,  ..., -0.5286,  0.4553,  0.5876],\n",
      "        [-1.5395,  1.8895,  0.2181,  ..., -0.7526,  1.0480, -0.2976],\n",
      "        [ 1.0250, -1.2972,  2.8359,  ..., -1.8022, -0.2443, -1.4095],\n",
      "        ...,\n",
      "        [-0.6451, -0.0917, -0.0912,  ..., -0.4984,  1.1305,  0.2169],\n",
      "        [-0.4203, -0.5042, -0.3693,  ..., -0.2688,  1.3679,  0.3731],\n",
      "        [-0.8209,  0.5253,  0.4781,  ..., -1.0562,  1.0671, -0.5276]],\n",
      "       grad_fn=<AddmmBackward0>)\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[1], line 416\u001b[0m\n\u001b[0;32m    414\u001b[0m model_NSA_plus \u001b[38;5;241m=\u001b[39m copy\u001b[38;5;241m.\u001b[39mdeepcopy(model_0)\n\u001b[0;32m    415\u001b[0m \u001b[38;5;66;03m#model_GD = CNN()\u001b[39;00m\n\u001b[1;32m--> 416\u001b[0m train_model_NSA_plus(model_NSA_plus, train_loader, test_loader, optimizer_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcustom\u001b[39m\u001b[38;5;124m'\u001b[39m, learning_rate\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.1\u001b[39m, num_epochs\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n\u001b[0;32m    417\u001b[0m train_model_NSA(model_NSA, train_loader, test_loader, optimizer_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcustom\u001b[39m\u001b[38;5;124m'\u001b[39m, learning_rate\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.1\u001b[39m, num_epochs\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n\u001b[0;32m    418\u001b[0m train_model_GD(model_GD, train_loader, test_loader, optimizer_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcustom\u001b[39m\u001b[38;5;124m'\u001b[39m, learning_rate\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.1\u001b[39m, num_epochs\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n",
      "Cell \u001b[1;32mIn[1], line 363\u001b[0m, in \u001b[0;36mtrain_model_NSA_plus\u001b[1;34m(model, train_loader, test_loader, optimizer_name, learning_rate, num_epochs)\u001b[0m\n\u001b[0;32m    361\u001b[0m total \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m    362\u001b[0m \u001b[38;5;66;03m# 统计损失和准确率\u001b[39;00m\n\u001b[1;32m--> 363\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m images, labels \u001b[38;5;129;01min\u001b[39;00m train_loader:\n\u001b[0;32m    364\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m model_x(images)\n\u001b[0;32m    365\u001b[0m     loss \u001b[38;5;241m=\u001b[39m criterion(outputs, labels)\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py:631\u001b[0m, in \u001b[0;36m_BaseDataLoaderIter.__next__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    628\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sampler_iter \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    629\u001b[0m     \u001b[38;5;66;03m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001b[39;00m\n\u001b[0;32m    630\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reset()  \u001b[38;5;66;03m# type: ignore[call-arg]\u001b[39;00m\n\u001b[1;32m--> 631\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_next_data()\n\u001b[0;32m    632\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_yielded \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m    633\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataset_kind \u001b[38;5;241m==\u001b[39m _DatasetKind\u001b[38;5;241m.\u001b[39mIterable \u001b[38;5;129;01mand\u001b[39;00m \\\n\u001b[0;32m    634\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_IterableDataset_len_called \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \\\n\u001b[0;32m    635\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_yielded \u001b[38;5;241m>\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_IterableDataset_len_called:\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py:675\u001b[0m, in \u001b[0;36m_SingleProcessDataLoaderIter._next_data\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    673\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_next_data\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m    674\u001b[0m     index \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_next_index()  \u001b[38;5;66;03m# may raise StopIteration\u001b[39;00m\n\u001b[1;32m--> 675\u001b[0m     data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataset_fetcher\u001b[38;5;241m.\u001b[39mfetch(index)  \u001b[38;5;66;03m# may raise StopIteration\u001b[39;00m\n\u001b[0;32m    676\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pin_memory:\n\u001b[0;32m    677\u001b[0m         data \u001b[38;5;241m=\u001b[39m _utils\u001b[38;5;241m.\u001b[39mpin_memory\u001b[38;5;241m.\u001b[39mpin_memory(data, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pin_memory_device)\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py:51\u001b[0m, in \u001b[0;36m_MapDatasetFetcher.fetch\u001b[1;34m(self, possibly_batched_index)\u001b[0m\n\u001b[0;32m     49\u001b[0m         data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset\u001b[38;5;241m.\u001b[39m__getitems__(possibly_batched_index)\n\u001b[0;32m     50\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m---> 51\u001b[0m         data \u001b[38;5;241m=\u001b[39m [\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset[idx] \u001b[38;5;28;01mfor\u001b[39;00m idx \u001b[38;5;129;01min\u001b[39;00m possibly_batched_index]\n\u001b[0;32m     52\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m     53\u001b[0m     data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset[possibly_batched_index]\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torchvision\\datasets\\mnist.py:145\u001b[0m, in \u001b[0;36mMNIST.__getitem__\u001b[1;34m(self, index)\u001b[0m\n\u001b[0;32m    142\u001b[0m img \u001b[38;5;241m=\u001b[39m Image\u001b[38;5;241m.\u001b[39mfromarray(img\u001b[38;5;241m.\u001b[39mnumpy(), mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mL\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    144\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtransform \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m--> 145\u001b[0m     img \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtransform(img)\n\u001b[0;32m    147\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtarget_transform \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    148\u001b[0m     target \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtarget_transform(target)\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torchvision\\transforms\\transforms.py:95\u001b[0m, in \u001b[0;36mCompose.__call__\u001b[1;34m(self, img)\u001b[0m\n\u001b[0;32m     93\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, img):\n\u001b[0;32m     94\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m t \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtransforms:\n\u001b[1;32m---> 95\u001b[0m         img \u001b[38;5;241m=\u001b[39m t(img)\n\u001b[0;32m     96\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m img\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1509\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m   1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1511\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m   1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m   1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m   1518\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m   1519\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1520\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m   1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m   1523\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torchvision\\transforms\\transforms.py:277\u001b[0m, in \u001b[0;36mNormalize.forward\u001b[1;34m(self, tensor)\u001b[0m\n\u001b[0;32m    269\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, tensor: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m    270\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m    271\u001b[0m \u001b[38;5;124;03m    Args:\u001b[39;00m\n\u001b[0;32m    272\u001b[0m \u001b[38;5;124;03m        tensor (Tensor): Tensor image to be normalized.\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    275\u001b[0m \u001b[38;5;124;03m        Tensor: Normalized Tensor image.\u001b[39;00m\n\u001b[0;32m    276\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m--> 277\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m F\u001b[38;5;241m.\u001b[39mnormalize(tensor, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmean, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstd, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minplace)\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torchvision\\transforms\\functional.py:349\u001b[0m, in \u001b[0;36mnormalize\u001b[1;34m(tensor, mean, std, inplace)\u001b[0m\n\u001b[0;32m    346\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tensor, torch\u001b[38;5;241m.\u001b[39mTensor):\n\u001b[0;32m    347\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mimg should be Tensor Image. Got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(tensor)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m--> 349\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m F_t\u001b[38;5;241m.\u001b[39mnormalize(tensor, mean\u001b[38;5;241m=\u001b[39mmean, std\u001b[38;5;241m=\u001b[39mstd, inplace\u001b[38;5;241m=\u001b[39minplace)\n",
      "File \u001b[1;32mg:\\anaconda\\anconda2\\envs\\venv\\Lib\\site-packages\\torchvision\\transforms\\_functional_tensor.py:920\u001b[0m, in \u001b[0;36mnormalize\u001b[1;34m(tensor, mean, std, inplace)\u001b[0m\n\u001b[0;32m    918\u001b[0m mean \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mas_tensor(mean, dtype\u001b[38;5;241m=\u001b[39mdtype, device\u001b[38;5;241m=\u001b[39mtensor\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[0;32m    919\u001b[0m std \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mas_tensor(std, dtype\u001b[38;5;241m=\u001b[39mdtype, device\u001b[38;5;241m=\u001b[39mtensor\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[1;32m--> 920\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (std \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m)\u001b[38;5;241m.\u001b[39many():\n\u001b[0;32m    921\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstd evaluated to zero after conversion to \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdtype\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, leading to division by zero.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    922\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m mean\u001b[38;5;241m.\u001b[39mndim \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m:\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision import datasets, transforms\n",
    "import matplotlib.pyplot as plt\n",
    "import copy\n",
    "import random\n",
    "import pandas as pd\n",
    "# 1. 设置随机种子（确保结果可复现）\n",
    "def set_seed(seed):\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.cuda.manual_seed_all(seed)\n",
    "    np.random.seed(seed)\n",
    "    random.seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = False\n",
    "\n",
    "# 设置随机种子\n",
    "seed = 42\n",
    "set_seed(seed)\n",
    "# 2. 准备数据集\n",
    "# 定义数据预处理\n",
    "transform = transforms.Compose([\n",
    "    transforms.ToTensor(),  # 将图像转换为张量\n",
    "    transforms.Normalize((0.5,), (0.5,))  # 归一化到 [-1, 1]\n",
    "])\n",
    "\n",
    "# 加载MNIST数据集\n",
    "train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n",
    "test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n",
    "\n",
    "# 创建数据加载器\n",
    "train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False)\n",
    "\n",
    "# 3. 定义CNN模型\n",
    "class CNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(CNN, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)  # 输入通道1，输出通道32\n",
    "        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)  # 输入通道32，输出通道64\n",
    "        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)  # 池化层\n",
    "        self.fc1 = nn.Linear(64 * 7 * 7, 128)  # 全连接层\n",
    "        self.fc2 = nn.Linear(128, 10)  # 输出层，10个类别\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.pool(F.relu(self.conv1(x)))  # 卷积 -> ReLU -> 池化\n",
    "        x = self.pool(F.relu(self.conv2(x)))  # 卷积 -> ReLU -> 池化\n",
    "        x = x.view(-1, 64 * 7 * 7)  # 展平\n",
    "        x = F.relu(self.fc1(x))  # 全连接 -> ReLU\n",
    "        x = self.fc2(x)  # 输出层\n",
    "        return x\n",
    "    \n",
    "\n",
    "def add_models(model1, model2):\n",
    "    result = CNN()  # 创建一个新的CNN实例\n",
    "    for param1, param2, param_result in zip(model1.parameters(), model2.parameters(), result.parameters()):\n",
    "        param_result.data = param1.data + param2.data  # 逐元素相加\n",
    "    return result\n",
    "    \n",
    "\n",
    "# 定义数乘操作\n",
    "def multiply_model(model, scalar):\n",
    "    result = CNN()  # 创建一个新的CNN实例\n",
    "    for param, param_result in zip(model.parameters(), result.parameters()):\n",
    "        param_result.data = param.data * scalar  # 逐元素乘以标量\n",
    "    return result\n",
    "\n",
    "# 4. 自定义优化算法\n",
    "class CustomOptimizer(optim.Optimizer):\n",
    "    def __init__(self, params, lr=0.01):\n",
    "        defaults = dict(lr=lr)\n",
    "        super(CustomOptimizer, self).__init__(params, defaults)\n",
    "\n",
    "    def step(self, closure=None):\n",
    "        loss = None\n",
    "        if closure is not None:\n",
    "            loss = closure()\n",
    "\n",
    "        for group in self.param_groups:\n",
    "            for p in group['params']:\n",
    "                if p.grad is None:\n",
    "                    continue\n",
    "                # 获取梯度\n",
    "                grad = p.grad.data\n",
    "                # 更新参数：theta = theta - lr * grad\n",
    "                p.data.add_(-group['lr'], grad)\n",
    "\n",
    "        return loss\n",
    "\n",
    "# 5. 定义优化器接口\n",
    "def get_optimizer(model, optimizer_name, learning_rate):\n",
    "    if optimizer_name == 'adam':\n",
    "        return optim.Adam(model.parameters(), lr=learning_rate)\n",
    "    elif optimizer_name == 'sgd':\n",
    "        return optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)\n",
    "    elif optimizer_name == 'rmsprop':\n",
    "        return optim.RMSprop(model.parameters(), lr=learning_rate)\n",
    "    elif optimizer_name == 'custom':\n",
    "        return CustomOptimizer(model.parameters(), lr=learning_rate)\n",
    "    else:\n",
    "        raise ValueError(f\"Unsupported optimizer: {optimizer_name}\")\n",
    "\n",
    "# 6. 训练模型\n",
    "def train_model_GD(model, train_loader, test_loader, optimizer_name, learning_rate, num_epochs):\n",
    "    # 定义损失函数和优化器\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = get_optimizer(model, optimizer_name, learning_rate)\n",
    "\n",
    "    train_losses = []\n",
    "    train_accuracies = []\n",
    "    k=0\n",
    "    for epoch in range(num_epochs):\n",
    "        model.train()  # 设置模型为训练模式\n",
    "        running_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "\n",
    "        for images, labels in train_loader:\n",
    "            # 前向传播\n",
    "            k+=1\n",
    "            outputs = model(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "\n",
    "            # 反向传播和优化\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            running_loss = 0.0\n",
    "            correct = 0\n",
    "            total = 0\n",
    "            for images, labels in train_loader:\n",
    "                outputs = model(images)\n",
    "                loss = criterion(outputs, labels)\n",
    "                running_loss += loss.item()\n",
    "                _, predicted = torch.max(outputs.data, 1)\n",
    "                total += labels.size(0)\n",
    "                correct += (predicted == labels).sum().item()\n",
    "            # 计算每个epoch的损失和准确率\n",
    "            epoch_loss = running_loss / len(train_loader)\n",
    "            epoch_accuracy = 100 * correct / total\n",
    "            train_losses.append(epoch_loss)\n",
    "            train_accuracies.append(epoch_accuracy)\n",
    "    \n",
    "            print(f\"Epoch [{k}/{len(train_loader)*num_epochs}], Loss: {epoch_loss:.4f}, Accuracy: {epoch_accuracy:.2f}%\")\n",
    "            if epoch_accuracy > 99:\n",
    "                break\n",
    "    pd.DataFrame(train_losses).to_excel('loss_GD.xlsx')  \n",
    "    pd.DataFrame(train_accuracies).to_excel('accuracies_GD.xlsx')  \n",
    "    # 测试模型\n",
    "    model.eval()  # 设置模型为评估模式\n",
    "    test_correct = 0\n",
    "    test_total = 0\n",
    "\n",
    "    with torch.no_grad():  # 禁用梯度计算\n",
    "        for images, labels in test_loader:\n",
    "            outputs = model(images)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            test_total += labels.size(0)\n",
    "            test_correct += (predicted == labels).sum().item()\n",
    "\n",
    "    test_accuracy = 100 * test_correct / test_total\n",
    "    print(f\"Test Accuracy: {test_accuracy:.2f}%\")\n",
    "\n",
    "    ## 可视化训练过程\n",
    "    #plt.figure(figsize=(12, 4))\n",
    "    #plt.subplot(1, 2, 1)\n",
    "    #plt.plot(train_losses, label='Training Loss')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Loss')\n",
    "    #plt.legend()\n",
    "#\n",
    "    #plt.subplot(1, 2, 2)\n",
    "    #plt.plot(train_accuracies, label='Training Accuracy')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Accuracy')\n",
    "    #plt.legend()\n",
    "    #plt.show()\n",
    "\n",
    "def train_model_NSA(model, train_loader, test_loader, optimizer_name, learning_rate, num_epochs):\n",
    "    # 定义损失函数和优化器\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = get_optimizer(model, optimizer_name, learning_rate)\n",
    "\n",
    "    train_losses = []\n",
    "    train_accuracies = []\n",
    "\n",
    "    model_x = copy.deepcopy(model)\n",
    "    model_y = copy.deepcopy(model)\n",
    "    model_z = copy.deepcopy(model)\n",
    "\n",
    "    optimizer_x = get_optimizer(model_x, optimizer_name, learning_rate)\n",
    "    optimizer_y = get_optimizer(model_y, optimizer_name, learning_rate)\n",
    "    optimizer_z = get_optimizer(model_z, optimizer_name, learning_rate)\n",
    "\n",
    "    k = 0\n",
    "    for epoch in range(num_epochs):\n",
    "        model_x.train()\n",
    "        model_y.train()  # 设置模型为训练模式\n",
    "        model_z.train()  # 设置模型为训练模式\n",
    "        running_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "\n",
    "        for images, labels in train_loader:\n",
    "            k+=1\n",
    "            alpha = 5/(k+5)\n",
    "            with torch.no_grad():\n",
    "                for param_y, param_x, param_z in zip(model_y.parameters(),model_x.parameters(), model_z.parameters()):\n",
    "                    # 更新 model_y 的参数\n",
    "                    param_y.data =  (1 - alpha)*param_x.data + alpha*param_z.data\n",
    "        \n",
    "            # 前向传播\n",
    "            outputs = model_y(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "\n",
    "            # 反向传播和优化\n",
    "            optimizer_y.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer_y.step()\n",
    "\n",
    "            for param_y, param_x in zip(model_y.parameters(),model_x.parameters()):\n",
    "                # 更新 model_y 的参数\n",
    "                param_x.data =  param_y.data\n",
    "\n",
    "            divisor = alpha\n",
    "            # 将处理后的梯度赋值给 model_z\n",
    "            for param_z, param_y in zip(model_z.parameters(), model_y.parameters()):\n",
    "                if param_y.grad is not None:\n",
    "                    param_z.grad = param_y.grad.div_(divisor).clone()  # 复制梯度\n",
    "            optimizer_z.step()\n",
    "   \n",
    "            # 统计损失和准确率\n",
    "            running_loss = 0.0\n",
    "            correct = 0\n",
    "            total = 0\n",
    "            for images, labels in train_loader:\n",
    "                outputs = model_x(images)\n",
    "                loss = criterion(outputs, labels)\n",
    "                running_loss += loss.item()\n",
    "                _, predicted = torch.max(outputs.data, 1)\n",
    "                total += labels.size(0)\n",
    "                correct += (predicted == labels).sum().item()\n",
    "            # 计算每个epoch的损失和准确率\n",
    "            epoch_loss = running_loss / len(train_loader)\n",
    "            epoch_accuracy = 100 * correct / total\n",
    "            train_losses.append(epoch_loss)\n",
    "            train_accuracies.append(epoch_accuracy)\n",
    "    \n",
    "            print(f\"Epoch [{k}/{len(train_loader)*num_epochs}], Loss: {epoch_loss:.4f}, Accuracy: {epoch_accuracy:.2f}%\")\n",
    "            if epoch_accuracy > 99:\n",
    "                break\n",
    "    pd.DataFrame(train_losses).to_excel('loss_NSA.xlsx')  \n",
    "    pd.DataFrame(train_accuracies).to_excel('accuracies_NSA.xlsx')  \n",
    "    # 测试模型\n",
    "    model_x.eval()  # 设置模型为评估模式\n",
    "    test_correct = 0\n",
    "    test_total = 0\n",
    "    with torch.no_grad():  # 禁用梯度计算\n",
    "        for images, labels in test_loader:\n",
    "            outputs = model_x(images)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            test_total += labels.size(0)\n",
    "            test_correct += (predicted == labels).sum().item()\n",
    "\n",
    "    test_accuracy = 100 * test_correct / test_total\n",
    "    print(f\"Test Accuracy: {test_accuracy:.2f}%\")\n",
    "\n",
    "    ## 可视化训练过程\n",
    "    #plt.figure(figsize=(12, 4))\n",
    "    #plt.subplot(1, 2, 1)\n",
    "    #plt.plot(train_losses, label='Training Loss')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Loss')\n",
    "    #plt.legend()\n",
    "#\n",
    "    #plt.subplot(1, 2, 2)\n",
    "    #plt.plot(train_accuracies, label='Training Accuracy')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Accuracy')\n",
    "    #plt.legend()\n",
    "    #plt.show()\n",
    "\n",
    "def train_model_NSA_plus(model, train_loader, test_loader, optimizer_name, learning_rate, num_epochs):\n",
    "    # 定义损失函数和优化器\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = get_optimizer(model, optimizer_name, learning_rate)\n",
    "\n",
    "    train_losses = []\n",
    "    train_accuracies = []\n",
    "\n",
    "    model_x = copy.deepcopy(model)\n",
    "    model_y = copy.deepcopy(model)\n",
    "    model_z = copy.deepcopy(model)\n",
    "\n",
    "    optimizer_x = get_optimizer(model_x, optimizer_name, learning_rate)\n",
    "    optimizer_y = get_optimizer(model_y, optimizer_name, learning_rate)\n",
    "    optimizer_z = get_optimizer(model_z, optimizer_name, learning_rate)\n",
    "\n",
    "    k = 0\n",
    "    for epoch in range(num_epochs):\n",
    "        model_x.train()\n",
    "        model_y.train()  # 设置模型为训练模式\n",
    "        model_z.train()  # 设置模型为训练模式\n",
    "        running_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "        for images, labels in train_loader:\n",
    "            k+=1\n",
    "            alpha = 5/(k+5)\n",
    "            with torch.no_grad():\n",
    "                for param_y, param_x, param_z in zip(model_y.parameters(),model_x.parameters(), model_z.parameters()):\n",
    "                    # 更新 model_y 的参数\n",
    "                    param_y.data =  (1 - alpha)*param_x.data + alpha*param_z.data\n",
    "\n",
    "            outputs = model_y(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "            optimizer_y.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer_y.step()\n",
    "\n",
    "            # 前向传播\n",
    "            outputs = model_x(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "            optimizer_x.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer_x.step()\n",
    "\n",
    "            #比较前向传播\n",
    "            outputs_x = model_x(images)\n",
    "            print(outputs_x)\n",
    "            loss_x = criterion(outputs_x, labels)\n",
    "            outputs_y = model_y(images)\n",
    "            loss_y = criterion(outputs_y, labels)\n",
    "\n",
    "            with torch.no_grad():\n",
    "                for param_x,param_y in zip(model_x.parameters(),model_y.parameters()):\n",
    "                    # 更新 model_y 的参数\n",
    "                    if loss_x < loss_y:\n",
    "                        continue\n",
    "                    else:\n",
    "                        param_x.data =  param_y.data.clone()\n",
    "\n",
    "            outputs_x = model_x(images)\n",
    "            loss_x = criterion(outputs_x, labels)\n",
    "        \n",
    "            divisor = alpha\n",
    "            # 将处理后的梯度赋值给 model_z\n",
    "            for param_z, param_y in zip(model_z.parameters(), model_y.parameters()):\n",
    "                if param_y.grad is not None:\n",
    "                    param_z.grad = param_y.grad.div_(divisor).clone()  # 复制梯度\n",
    "            optimizer_z.step()\n",
    "\n",
    "            running_loss = 0.0\n",
    "            correct = 0\n",
    "            total = 0\n",
    "            # 统计损失和准确率\n",
    "            for images, labels in train_loader:\n",
    "                outputs = model_x(images)\n",
    "                loss = criterion(outputs, labels)\n",
    "                running_loss += loss.item()\n",
    "                _, predicted = torch.max(outputs.data, 1)\n",
    "                total += labels.size(0)\n",
    "                correct += (predicted == labels).sum().item()\n",
    "            # 计算每个epoch的损失和准确率\n",
    "            epoch_loss = running_loss / len(train_loader)\n",
    "            epoch_accuracy = 100 * correct / total\n",
    "            train_losses.append(epoch_loss)\n",
    "            train_accuracies.append(epoch_accuracy)\n",
    "            print(f\"Epoch [{k}/{len(train_loader)*num_epochs}], Loss: {epoch_loss:.4f}, Accuracy: {epoch_accuracy:.2f}%\")\n",
    "            if epoch_accuracy > 99:\n",
    "                break\n",
    "           \n",
    "    pd.DataFrame(train_losses).to_excel('loss_NSA_plus.xlsx')  \n",
    "    pd.DataFrame(train_accuracies).to_excel('accuracies_NSA_plus.xlsx')  \n",
    "    # 测试模型\n",
    "    model_x.eval()  # 设置模型为评估模式\n",
    "    test_correct = 0\n",
    "    test_total = 0\n",
    "    with torch.no_grad():  # 禁用梯度计算\n",
    "        for images, labels in test_loader:\n",
    "            outputs = model_x(images)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            test_total += labels.size(0)\n",
    "            test_correct += (predicted == labels).sum().item()\n",
    "\n",
    "    test_accuracy = 100 * test_correct / test_total\n",
    "    print(f\"Test Accuracy: {test_accuracy:.2f}%\")\n",
    "    \n",
    "    ## 可视化训练过程\n",
    "    #plt.figure(figsize=(12, 4))\n",
    "    #plt.subplot(1, 2, 1)\n",
    "    #plt.plot(train_losses, label='Training Loss')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Loss')\n",
    "    #plt.legend()\n",
    "#\n",
    "    #plt.subplot(1, 2, 2)\n",
    "    #plt.plot(train_accuracies, label='Training Accuracy')\n",
    "    #plt.xlabel('Epoch')\n",
    "    #plt.ylabel('Accuracy')\n",
    "    #plt.legend()\n",
    "    #plt.show()\n",
    "\n",
    "# 7. 初始化模型并训练\n",
    "model_0 = CNN()\n",
    "model_GD = copy.deepcopy(model_0)\n",
    "model_NSA = copy.deepcopy(model_0)\n",
    "model_NSA_plus = copy.deepcopy(model_0)\n",
    "#model_GD = CNN()\n",
    "train_model_NSA_plus(model_NSA_plus, train_loader, test_loader, optimizer_name='custom', learning_rate=0.1, num_epochs=1)\n",
    "train_model_NSA(model_NSA, train_loader, test_loader, optimizer_name='custom', learning_rate=0.1, num_epochs=1)\n",
    "train_model_GD(model_GD, train_loader, test_loader, optimizer_name='custom', learning_rate=0.1, num_epochs=1)\n",
    "\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
