{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9d8cc94e-44e5-4329-8048-f43655d863c4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torchvision import datasets, transforms\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "\n",
    "# 数据预处理函数\n",
    "def load_mnist():\n",
    "    # 下载并加载MNIST数据集\n",
    "    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n",
    "    train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n",
    "    test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n",
    "\n",
    "    # 使用DataLoader加载数据\n",
    "    train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n",
    "    test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)\n",
    "\n",
    "    return train_loader, test_loader\n",
    "\n",
    "# 加载Wi-Fi动作感知数据集\n",
    "def load_wifi_data(file_path):\n",
    "    if not os.path.exists(file_path):\n",
    "        print(f\"File '{file_path}' not found. Generating a sample Wi-Fi dataset.\")\n",
    "        # 生成示例Wi-Fi数据集\n",
    "        num_samples = 1000\n",
    "        num_features = 30\n",
    "        X = np.random.rand(num_samples, num_features)  # 随机生成特征\n",
    "        y = np.random.randint(0, 5, size=(num_samples,))  # 生成0-4的分类标签\n",
    "        data = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(num_features)])\n",
    "        data['label'] = y\n",
    "        data.to_csv(file_path, index=False)\n",
    "        print(f\"Sample dataset saved to '{file_path}'.\")\n",
    "\n",
    "    # 加载Wi-Fi动作感知数据集\n",
    "    data = pd.read_csv(file_path)\n",
    "\n",
    "    # 假设数据集最后一列是标签\n",
    "    X = data.iloc[:, :-1].values\n",
    "    y = data.iloc[:, -1].values\n",
    "\n",
    "    # 数据归一化处理\n",
    "    scaler = StandardScaler()\n",
    "    X = scaler.fit_transform(X)\n",
    "\n",
    "    # 创建自定义Dataset\n",
    "    class WiFiDataset(Dataset):\n",
    "        def __init__(self, X, y):\n",
    "            self.X = torch.tensor(X, dtype=torch.float32)\n",
    "            self.y = torch.tensor(y, dtype=torch.long)\n",
    "\n",
    "        def __len__(self):\n",
    "            return len(self.X)\n",
    "\n",
    "        def __getitem__(self, idx):\n",
    "            return self.X[idx], self.y[idx]\n",
    "\n",
    "    # 转换为DataLoader\n",
    "    wifi_dataset = WiFiDataset(X, y)\n",
    "    wifi_loader = DataLoader(wifi_dataset, batch_size=64, shuffle=True)\n",
    "\n",
    "    return wifi_loader\n",
    "\n",
    "\n",
    "# 定义FNN模型\n",
    "class FNN(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, output_size, num_layers=2):\n",
    "        super(FNN, self).__init__()\n",
    "        layers = []\n",
    "        layers.append(nn.Linear(input_size, hidden_size))  # 输入维度为784，隐藏层维度为hidden_size\n",
    "        layers.append(nn.ReLU())\n",
    "\n",
    "        for _ in range(num_layers - 1):\n",
    "            layers.append(nn.Linear(hidden_size, hidden_size))\n",
    "            layers.append(nn.ReLU())\n",
    "\n",
    "        layers.append(nn.Linear(hidden_size, output_size))  # 输出层\n",
    "        self.model = nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.model(x)\n",
    "\n",
    "\n",
    "# 训练模型\n",
    "def train_model(model, train_loader, criterion, optimizer, epochs=10):\n",
    "    model.train()\n",
    "    train_losses = []\n",
    "    train_accuracies = []\n",
    "\n",
    "    for epoch in range(epochs):\n",
    "        epoch_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "\n",
    "        for inputs, labels in train_loader:\n",
    "            # 展平每个输入图像，确保每个输入是 (batch_size, 784)\n",
    "            inputs = inputs.view(inputs.size(0), -1)  # 展平输入数据\n",
    "            \n",
    "            # 前向传播\n",
    "            outputs = model(inputs)\n",
    "            loss = criterion(outputs, labels)\n",
    "\n",
    "            # 后向传播\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            epoch_loss += loss.item()\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            total += labels.size(0)\n",
    "            correct += (predicted == labels).sum().item()\n",
    "\n",
    "        epoch_accuracy = 100 * correct / total\n",
    "        train_losses.append(epoch_loss / len(train_loader))\n",
    "        train_accuracies.append(epoch_accuracy)\n",
    "\n",
    "        print(f\"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss/len(train_loader):.4f}, Accuracy: {epoch_accuracy:.2f}%\")\n",
    "\n",
    "    return train_losses, train_accuracies\n",
    "\n",
    "\n",
    "# 评估模型\n",
    "def evaluate_model(model, test_loader):\n",
    "    model.eval()\n",
    "    all_labels = []\n",
    "    all_preds = []\n",
    "\n",
    "    with torch.no_grad():\n",
    "        for inputs, labels in test_loader:\n",
    "            inputs = inputs.view(inputs.size(0), -1)  # 展平输入数据\n",
    "            outputs = model(inputs)\n",
    "            _, predicted = torch.max(outputs, 1)\n",
    "            all_labels.extend(labels.numpy())\n",
    "            all_preds.extend(predicted.numpy())\n",
    "\n",
    "    accuracy = accuracy_score(all_labels, all_preds)\n",
    "    print(f\"Test Accuracy: {accuracy * 100:.2f}%\")\n",
    "\n",
    "\n",
    "# 可视化学习曲线\n",
    "def plot_learning_curve(train_losses, train_accuracies):\n",
    "    epochs = range(1, len(train_losses) + 1)\n",
    "    plt.figure(figsize=(12, 5))\n",
    "\n",
    "    # 绘制损失曲线\n",
    "    plt.subplot(1, 2, 1)\n",
    "    plt.plot(epochs, train_losses, label='Training Loss')\n",
    "    plt.xlabel('Epochs')\n",
    "    plt.ylabel('Loss')\n",
    "    plt.title('Training Loss')\n",
    "\n",
    "    # 绘制准确率曲线\n",
    "    plt.subplot(1, 2, 2)\n",
    "    plt.plot(epochs, train_accuracies, label='Training Accuracy')\n",
    "    plt.xlabel('Epochs')\n",
    "    plt.ylabel('Accuracy (%)')\n",
    "    plt.title('Training Accuracy')\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "# 主程序\n",
    "if __name__ == \"__main__\":\n",
    "    # 加载MNIST数据集\n",
    "    mnist_train_loader, mnist_test_loader = load_mnist()\n",
    "\n",
    "    # 假设Wi-Fi数据集文件路径为 \"wifi_data.csv\"\n",
    "    wifi_train_loader = load_wifi_data(\"D:\\\\Anaconda\\\\Data\\\\wifi_data.xls\")\n",
    "\n",
    "    # 定义FNN模型\n",
    "    input_size = 28 * 28  # MNIST的输入大小 28x28 图像\n",
    "    hidden_size = 128  # 隐藏层节点数\n",
    "    output_size = 10  # MNIST的分类数目（10个数字）\n",
    "    fnn_model = FNN(input_size, hidden_size, output_size)\n",
    "\n",
    "    # 定义损失函数和优化器\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.Adam(fnn_model.parameters(), lr=0.001)\n",
    "\n",
    "    # 训练模型\n",
    "    train_losses, train_accuracies = train_model(fnn_model, mnist_train_loader, criterion, optimizer, epochs=10)\n",
    "\n",
    "    # 评估模型\n",
    "    evaluate_model(fnn_model, mnist_test_loader)\n",
    "\n",
    "    # 绘制学习曲线\n",
    "    plot_learning_curve(train_losses, train_accuracies)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
