{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b3d210bb-cb03-475d-aa92-da5d77265d2e",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ConvNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(ConvNet, self).__init__()\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv1d(1, 16, kernel_size=21, padding=0),\n",
    "            nn.BatchNorm1d(16),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.conv2 = nn.Sequential(\n",
    "            nn.Conv1d(16, 32, kernel_size=19, padding=0),\n",
    "            nn.BatchNorm1d(32),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.conv3 = nn.Sequential(\n",
    "            nn.Conv1d(32, 64, kernel_size=17, padding=0),\n",
    "            nn.BatchNorm1d(64),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.fc = nn.Linear(38080, 1)  # 8960 ,17920\n",
    "        self.drop = nn.Dropout(0.2)\n",
    "\n",
    "    def forward(self, out):\n",
    "        out = self.conv1(out)\n",
    "        out = self.conv2(out)\n",
    "        out = self.conv3(out)\n",
    "        out = out.view(out.size(0), -1)\n",
    "        # print(out.size(1))\n",
    "        out = self.fc(out)\n",
    "        return out\n",
    "\n",
    "\n",
    "class AlexNet(nn.Module):\n",
    "    def __init__(self, num_classes=1, reduction=16):\n",
    "        super(AlexNet, self).__init__()\n",
    "        self.features = nn.Sequential(\n",
    "            # conv1\n",
    "            nn.Conv1d(1, 16, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm1d(num_features=16),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # conv2\n",
    "            nn.Conv1d(16, 32, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm1d(num_features=32),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # conv3\n",
    "            nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # conv4\n",
    "            nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm1d(num_features=128),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # conv5\n",
    "            nn.Conv1d(128, 192, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm1d(num_features=192),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # SELayer(256, reduction),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "        )\n",
    "        self.reg = nn.Sequential(\n",
    "            nn.Linear(3840, 1000),  # 根据自己数据集修改\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.Linear(1000, 500),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.Dropout(0.5),\n",
    "            nn.Linear(500, num_classes),\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.features(x)\n",
    "        out = out.flatten(start_dim=1)\n",
    "        out = self.reg(out)\n",
    "        return out\n",
    "\n",
    "\n",
    "class Inception(nn.Module):\n",
    "    def __init__(self, in_c, c1, c2, c3, out_C):\n",
    "        super(Inception, self).__init__()\n",
    "        self.p1 = nn.Sequential(\n",
    "            nn.Conv1d(in_c, c1, kernel_size=1, padding=0),\n",
    "            nn.Conv1d(c1, c1, kernel_size=3, padding=1)\n",
    "        )\n",
    "        self.p2 = nn.Sequential(\n",
    "            nn.Conv1d(in_c, c2, kernel_size=1, padding=0),\n",
    "            nn.Conv1d(c2, c2, kernel_size=5, padding=2)\n",
    "\n",
    "        )\n",
    "        self.p3 = nn.Sequential(\n",
    "            nn.MaxPool1d(kernel_size=3, stride=1, padding=1),\n",
    "            nn.Conv1d(in_c, c3, kernel_size=3, padding=1),\n",
    "        )\n",
    "        self.conv_linear = nn.Conv1d((c1 + c2 + c3), out_C, 1, 1, 0, bias=True)\n",
    "        self.short_cut = nn.Sequential()\n",
    "        if in_c != out_C:\n",
    "            self.short_cut = nn.Sequential(\n",
    "                nn.Conv1d(in_c, out_C, 1, 1, 0, bias=False),\n",
    "\n",
    "            )\n",
    "\n",
    "    def forward(self, x):\n",
    "        p1 = self.p1(x)\n",
    "        p2 = self.p2(x)\n",
    "        p3 = self.p3(x)\n",
    "        out = torch.cat((p1, p2, p3), dim=1)\n",
    "        out += self.short_cut(x)\n",
    "        return out\n",
    "\n",
    "\n",
    "class DeepSpectra(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(DeepSpectra, self).__init__()\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv1d(1, 16, kernel_size=5, stride=3, padding=0)\n",
    "        )\n",
    "        self.Inception = Inception(16, 32, 32, 32, 96)\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Linear(20640, 5000),\n",
    "            nn.Dropout(0.5),\n",
    "            nn.Linear(5000, 1)\n",
    "        )\n",
    "        self.dropout = nn.Dropout(0.1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.Inception(x)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.fc(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "class SpectraCNN(nn.Module):\n",
    "    \"\"\"\n",
    "    实例化模型\n",
    "    model = SpectraCNN()\n",
    "\n",
    "    损失函数和优化器\n",
    "    criterion = nn.MSELoss()\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.001)\n",
    "\n",
    "    训练模型\n",
    "    for epoch in range(EPOCH):\n",
    "        for data in train_loader:\n",
    "            inputs, labels = data\n",
    "            inputs = inputs.unsqueeze(1) # 增加一维，因为CNN输入需要4维\n",
    "            inputs, labels = inputs.to(device), labels.to(device)\n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(inputs)\n",
    "            loss = criterion(outputs, labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "    # 每个epoch结束后输出loss\n",
    "    print(\"Epoch: {}, Loss: {:.4f}\".format(epoch+1, loss.item()))\n",
    "\n",
    "    测试模型\n",
    "    with torch.no_grad():\n",
    "        correct = 0\n",
    "        total = 0\n",
    "        for data in test_loader:\n",
    "            inputs, labels = data\n",
    "            inputs = inputs.unsqueeze(1)\n",
    "            inputs, labels = inputs.to(device), labels.to(device)\n",
    "            outputs = model(inputs)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            total += labels.size(0)\n",
    "            correct += (predicted == labels).sum().item()\n",
    "        print(\"Accuracy: {:.2f}%\".format(100 * correct / total))\n",
    "    \"\"\"\n",
    "    def __init__(self):\n",
    "        super(SpectraCNN, self).__init__()\n",
    "\n",
    "        # Input layer\n",
    "        self.conv1 = nn.Conv1d(in_channels=1, out_channels=16, kernel_size=3, padding=1)\n",
    "        self.bn1 = nn.BatchNorm1d(16)\n",
    "        self.pool1 = nn.MaxPool1d(kernel_size=2)\n",
    "        self.dropout1 = nn.Dropout(p=0.25)\n",
    "\n",
    "        # Hidden layer 1\n",
    "        self.conv2 = nn.Conv1d(in_channels=16, out_channels=32, kernel_size=3, padding=1)\n",
    "        self.bn2 = nn.BatchNorm1d(32)\n",
    "        self.pool2 = nn.MaxPool1d(kernel_size=2)\n",
    "        self.dropout2 = nn.Dropout(p=0.25)\n",
    "\n",
    "        # Hidden layer 2\n",
    "        self.conv3 = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, padding=1)\n",
    "        self.bn3 = nn.BatchNorm1d(64)\n",
    "        self.pool3 = nn.MaxPool1d(kernel_size=2)\n",
    "        self.dropout3 = nn.Dropout(p=0.25)\n",
    "\n",
    "        # Output layer\n",
    "        self.fc = nn.Linear(in_features=5184, out_features=1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.pool1(x)\n",
    "        x = self.dropout1(x)\n",
    "        x = self.conv2(x)\n",
    "        x = self.bn2(x)\n",
    "        x = self.pool2(x)\n",
    "        x = self.dropout2(x)\n",
    "\n",
    "        x = self.conv3(x)\n",
    "        x = self.bn3(x)\n",
    "        x = self.pool3(x)\n",
    "        x = self.dropout3(x)\n",
    "\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.fc(x)\n",
    "\n",
    "        return x\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
