{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "618be034-f18f-421c-ae6b-f070edaf0602",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Resnet(torch.nn.Module):\n",
    "    def __init__(self,acti,c_num):\n",
    "        super(Resnet, self).__init__()\n",
    "        self.features = torch.nn.Sequential(\n",
    "            torch.nn.Conv1d(1,16,kernel_size=7,stride=2,padding=3),\n",
    "            torch.nn.MaxPool1d(3,2,1),\n",
    "            Bottlrneck(16,16,64,False,acti),\n",
    "            Bottlrneck(64,16,64,False,acti),\n",
    "            Bottlrneck(64,16,64,False,acti),\n",
    "        )\n",
    "        self.layers=nn.ModuleList([])\n",
    "        input_channel=64\n",
    "        output_channel=128\n",
    "        med_channel=64\n",
    "        for i in range(1,c_num):\n",
    "            self.layers.append(Bottlrneck(input_channel,med_channel,output_channel,(1==i)|(4==i)|(7==i),acti))\n",
    "            if (1==i)|(4==i)|(7==i):\n",
    "                input_channel=input_channel*2\n",
    "            elif (3==i)|(6==i):\n",
    "                med_channel=med_channel*2\n",
    "                output_channel=output_channel*2\n",
    "        self.layers.append(torch.nn.AdaptiveAvgPool1d(1))\n",
    "        self.classifer = torch.nn.Sequential(\n",
    "            torch.nn.Linear(2048,1)\n",
    "        )\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "9d9ad9e9-5c84-4586-8efd-a14f4435d0e7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1\n",
      "a\n",
      "2\n",
      "a\n",
      "3\n"
     ]
    }
   ],
   "source": [
    "c_num=3\n",
    "for i in range(1,c_num+1):\n",
    "    print(i)\n",
    "    if (i!=c_num):\n",
    "        print(\"a\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4e8f1b35-916f-4459-b9ee-6d2af5d27290",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Resnet(torch.nn.Module):\n",
    "    def __init__(self,acti,c_num):\n",
    "        super(Resnet, self).__init__()\n",
    "        self.features = torch.nn.Sequential(\n",
    "            torch.nn.Conv1d(1,64,kernel_size=7,stride=2,padding=3),\n",
    "            torch.nn.MaxPool1d(3,2,1),\n",
    "            Bottlrneck(64,64,256,False,acti),\n",
    "            Bottlrneck(256,64,256,False,acti),\n",
    "            Bottlrneck(256,64,256,False,acti),\n",
    "        )\n",
    "        self.layers=nn.ModuleList([])\n",
    "        input_channel=256\n",
    "        output_channel=512\n",
    "        med_channel=128\n",
    "        for i in range(1,c_num):\n",
    "            self.layers.append(Bottlrneck(input_channel,med_channel,output_channel,(1==i)|(4==i)|(7==i),acti))\n",
    "            if (1==i)|(4==i)|(7==i):\n",
    "                input_channel=input_channel*2\n",
    "            elif (3==i)|(6==i):\n",
    "                med_channel=med_channel*2\n",
    "                output_channel=output_channel*2\n",
    "        self.layers.append(torch.nn.AdaptiveAvgPool1d(1))\n",
    "        self.classifer = torch.nn.Sequential(\n",
    "            torch.nn.Linear(2048,1)\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "04027f94-9209-4b1c-8a11-341e456e8d52",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from collections.abc import Iterable\n",
    "\n",
    "class Bottlrneck(torch.nn.Module):\n",
    "    def __init__(self,In_channel,Med_channel,Out_channel,downsample=False):\n",
    "        super(Bottlrneck, self).__init__()\n",
    "        self.stride = 1\n",
    "        if downsample == True:\n",
    "            self.stride = 2\n",
    "\n",
    "        self.layer = torch.nn.Sequential(\n",
    "            torch.nn.Conv1d(In_channel, Med_channel, 1, self.stride),\n",
    "            torch.nn.BatchNorm1d(Med_channel),\n",
    "            torch.nn.ReLU(),\n",
    "            torch.nn.Conv1d(Med_channel, Med_channel, 3, padding=1),\n",
    "            torch.nn.BatchNorm1d(Med_channel),\n",
    "            torch.nn.ReLU(),\n",
    "            torch.nn.Conv1d(Med_channel, Out_channel, 1),\n",
    "            torch.nn.BatchNorm1d(Out_channel),\n",
    "            torch.nn.ReLU(),\n",
    "        )\n",
    "\n",
    "        if In_channel != Out_channel:\n",
    "            self.res_layer = torch.nn.Conv1d(In_channel, Out_channel,1,self.stride)\n",
    "        else:\n",
    "            self.res_layer = None\n",
    "\n",
    "    def forward(self,x):\n",
    "        if self.res_layer is not None:\n",
    "            residual = self.res_layer(x)\n",
    "        else:\n",
    "            residual = x\n",
    "        return self.layer(x)+residual\n",
    "\n",
    "\n",
    "class Resnet(torch.nn.Module):\n",
    "    def __init__(self,in_channels=1,classes=1):\n",
    "        super(Resnet, self).__init__()\n",
    "        self.features = torch.nn.Sequential(\n",
    "            torch.nn.Conv1d(in_channels,64,kernel_size=7,stride=2,padding=3),\n",
    "            torch.nn.MaxPool1d(3,2,1),\n",
    "\n",
    "            Bottlrneck(64,64,256,False),\n",
    "            Bottlrneck(256,64,256,False),\n",
    "            Bottlrneck(256,64,256,False),\n",
    "            #\n",
    "            \n",
    "            Bottlrneck(256,128,512, True),\n",
    "            Bottlrneck(512,128,512, False),\n",
    "            Bottlrneck(512,128,512, False),\n",
    "            #\n",
    "            Bottlrneck(512,256,1024, True),\n",
    "            Bottlrneck(1024,256,1024, False),\n",
    "            Bottlrneck(1024,256,1024, False),\n",
    "            #\n",
    "            Bottlrneck(1024,512,2048, True),\n",
    "            Bottlrneck(2048,512,2048, False),\n",
    "            Bottlrneck(2048,512,2048, False),\n",
    "\n",
    "            torch.nn.AdaptiveAvgPool1d(1)\n",
    "        )\n",
    "        self.classifer = torch.nn.Sequential(\n",
    "            torch.nn.Linear(2048,classes)\n",
    "        )\n",
    "\n",
    "    def forward(self,x):\n",
    "        x = self.features(x)\n",
    "        x = x.view(-1,2048)\n",
    "        x = self.classifer(x)\n",
    "        return x\n",
    "\n",
    "#纵向结构\n",
    "class ConvNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(ConvNet,self).__init__()\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv1d(1, 16, kernel_size=21, padding=0),\n",
    "            nn.BatchNorm1d(16),\n",
    "            nn.ReLU()#探索激活函数的性能\n",
    "        )\n",
    "        self.conv2 = nn.Sequential(\n",
    "            nn.Conv1d(16, 32, kernel_size=19, padding=0),\n",
    "            nn.BatchNorm1d(32),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.conv3 = nn.Sequential(\n",
    "            nn.Conv1d(32, 64, kernel_size=17, padding=0),\n",
    "            nn.BatchNorm1d(64),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.fc = nn. Linear(38080,1) #8960 ,17920\n",
    "        self.drop = nn.Dropout(0.2)\n",
    "\n",
    "    def forward(self,out):\n",
    "        out = self.conv1(out)\n",
    "        out = self.conv2(out)\n",
    "        out = self.conv3(out)\n",
    "        out = out.view(out.size(0),-1)\n",
    "        # print(out.size(1))\n",
    "        out = self.fc(out)\n",
    "        return out\n",
    "\n",
    "\n",
    "class AlexNet(nn.Module):\n",
    "    def __init__(self, num_classes=1, reduction=16):\n",
    "        super(AlexNet, self).__init__()\n",
    "        self.features = nn.Sequential(\n",
    "            # conv1\n",
    "            nn.Conv1d(1, 16, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm1d(num_features=16),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # conv2\n",
    "            nn.Conv1d(16, 32, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm1d(num_features=32),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # conv3\n",
    "            nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.LeakyReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # conv4\n",
    "            nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm1d(num_features=128),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.LeakyReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # conv5\n",
    "            nn.Conv1d(128, 256, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm1d(num_features=256),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            # SELayer(256, reduction),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            #nn.Conv1d(256, 512, kernel_size=3, stride=1, padding=1),\n",
    "            #nn.BatchNorm1d(num_features=512),\n",
    "            #nn.ReLU(inplace=True),\n",
    "            #nn.MaxPool1d(kernel_size=2, stride=2),\n",
    "            \n",
    "            \n",
    "        )\n",
    "        self.reg = nn.Sequential(\n",
    "            nn.Linear(33280,1000),#根据自己数据集修改\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.Linear(1000, 500),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # nn.LeakyReLU(inplace=True),\n",
    "            nn.Dropout(0.5),\n",
    "            nn.Linear(500, num_classes),\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.features(x)\n",
    "        out = out.flatten(start_dim=1)\n",
    "        out = self.reg(out)\n",
    "        return out\n",
    "\n",
    "class Inception(nn.Module):\n",
    "    def __init__(self,in_c,c1,c2,c3,out_C):\n",
    "        super(Inception,self).__init__()\n",
    "        self.p1 = nn.Sequential(\n",
    "            nn.Conv1d(in_c, c1,kernel_size=1,padding=0),\n",
    "            nn.Conv1d(c1, c1, kernel_size=3, padding=1)\n",
    "        )\n",
    "        self.p2 = nn.Sequential(\n",
    "            nn.Conv1d(in_c, c2,kernel_size=1,padding=0),\n",
    "            nn.Conv1d(c2, c2, kernel_size=5, padding=2)\n",
    "\n",
    "        )\n",
    "        self.p3 = nn.Sequential(\n",
    "            nn.MaxPool1d(kernel_size=3,stride=1,padding=1),\n",
    "            nn.Conv1d(in_c, c3,kernel_size=3,padding=1),\n",
    "        )\n",
    "        self.conv_linear = nn.Conv1d((c1+c2+c3), out_C, 1, 1, 0, bias=True)\n",
    "        self.short_cut = nn.Sequential()\n",
    "        if in_c != out_C:\n",
    "            self.short_cut = nn.Sequential(\n",
    "                nn.Conv1d(in_c, out_C, 1, 1, 0, bias=False),\n",
    "\n",
    "            )\n",
    "    def forward(self, x):\n",
    "        p1 = self.p1(x)\n",
    "        p2 = self.p2(x)\n",
    "        p3 = self.p3(x)\n",
    "        out =  torch.cat((p1,p2,p3),dim=1)\n",
    "        out += self.short_cut(x)\n",
    "        return out\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "class DeepSpectra(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(DeepSpectra, self).__init__()\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv1d(1, 16, kernel_size=5, stride=3, padding=0)\n",
    "        )\n",
    "        self.Inception = Inception(16, 32, 32, 32, 96)\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Linear(20640, 5000),\n",
    "            nn.Dropout(0.5),\n",
    "            nn.Linear(5000, 1)\n",
    "        )\n",
    "        self.dropout = nn.Dropout(0.1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.Inception(x)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.fc(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "class DenseLayer(torch.nn.Module):\n",
    "    def __init__(self,in_channels,middle_channels=128,out_channels=32):\n",
    "        super(DenseLayer, self).__init__()\n",
    "        self.layer = torch.nn.Sequential(\n",
    "            torch.nn.BatchNorm1d(in_channels),\n",
    "            torch.nn.ReLU(inplace=True),\n",
    "            torch.nn.Conv1d(in_channels,middle_channels,1),\n",
    "            torch.nn.BatchNorm1d(middle_channels),\n",
    "            torch.nn.ReLU(inplace=True),\n",
    "            torch.nn.Conv1d(middle_channels,out_channels,3,padding=1)\n",
    "        )\n",
    "    def forward(self,x):\n",
    "        return torch.cat([x,self.layer(x)],dim=1)\n",
    "\n",
    "\n",
    "class DenseBlock(torch.nn.Sequential):\n",
    "    def __init__(self,layer_num,growth_rate,in_channels,middele_channels=128):\n",
    "        super(DenseBlock, self).__init__()\n",
    "        for i in range(layer_num):\n",
    "            layer = DenseLayer(in_channels+i*growth_rate,middele_channels,growth_rate)\n",
    "            self.add_module('denselayer%d'%(i),layer)\n",
    "\n",
    "class Transition(torch.nn.Sequential):\n",
    "    def __init__(self,channels):\n",
    "        super(Transition, self).__init__()\n",
    "        self.add_module('norm',torch.nn.BatchNorm1d(channels))\n",
    "        self.add_module('relu',torch.nn.ReLU(inplace=True))\n",
    "        self.add_module('conv',torch.nn.Conv1d(channels,channels//2,3,padding=1))\n",
    "        self.add_module('Avgpool',torch.nn.AvgPool1d(2))\n",
    "\n",
    "\n",
    "class DenseNet(torch.nn.Module):\n",
    "    def __init__(self,layer_num=(6,12,24,16),growth_rate=32,init_features=64,in_channels=1,middele_channels=128,classes=1):\n",
    "        super(DenseNet, self).__init__()\n",
    "        self.feature_channel_num=init_features\n",
    "        self.conv=torch.nn.Conv1d(in_channels,self.feature_channel_num,7,2,3)\n",
    "        self.norm=torch.nn.BatchNorm1d(self.feature_channel_num)\n",
    "        self.relu=torch.nn.ReLU()\n",
    "        self.maxpool=torch.nn.MaxPool1d(3,2,1)\n",
    "\n",
    "        self.DenseBlock1=DenseBlock(layer_num[0],growth_rate,self.feature_channel_num,middele_channels)\n",
    "        self.feature_channel_num=self.feature_channel_num+layer_num[0]*growth_rate\n",
    "        self.Transition1=Transition(self.feature_channel_num)\n",
    "\n",
    "        self.DenseBlock2=DenseBlock(layer_num[1],growth_rate,self.feature_channel_num//2,middele_channels)\n",
    "        self.feature_channel_num=self.feature_channel_num//2+layer_num[1]*growth_rate\n",
    "        self.Transition2 = Transition(self.feature_channel_num)\n",
    "\n",
    "        self.DenseBlock3 = DenseBlock(layer_num[2],growth_rate,self.feature_channel_num//2,middele_channels)\n",
    "        self.feature_channel_num=self.feature_channel_num//2+layer_num[2]*growth_rate\n",
    "        self.Transition3 = Transition(self.feature_channel_num)\n",
    "\n",
    "        self.DenseBlock4 = DenseBlock(layer_num[3],growth_rate,self.feature_channel_num//2,middele_channels)\n",
    "        self.feature_channel_num=self.feature_channel_num//2+layer_num[3]*growth_rate\n",
    "\n",
    "        self.avgpool=torch.nn.AdaptiveAvgPool1d(1)\n",
    "\n",
    "        self.classifer = torch.nn.Sequential(\n",
    "            torch.nn.Linear(self.feature_channel_num, self.feature_channel_num//2),\n",
    "            torch.nn.ReLU(),\n",
    "            torch.nn.Dropout(0.5),\n",
    "            torch.nn.Linear(self.feature_channel_num//2, classes),\n",
    "\n",
    "        )\n",
    "\n",
    "\n",
    "    def forward(self,x):\n",
    "        x = self.conv(x)\n",
    "        x = self.norm(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.maxpool(x)\n",
    "\n",
    "        x = self.DenseBlock1(x)\n",
    "        x = self.Transition1(x)\n",
    "\n",
    "        x = self.DenseBlock2(x)\n",
    "        x = self.Transition2(x)\n",
    "\n",
    "        x = self.DenseBlock3(x)\n",
    "        x = self.Transition3(x)\n",
    "\n",
    "        x = self.DenseBlock4(x)\n",
    "        x = self.avgpool(x)\n",
    "        x = x.view(-1,self.feature_channel_num)\n",
    "        x = self.classifer(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "    \n",
    "# class DenseNet(torch.nn.Module):\n",
    "#     def __init__(self,layer_num=(6,12,24,16),growth_rate=32,init_features=64,in_channels=1,middele_channels=128,classes=1):\n",
    "#         super(DenseNet, self).__init__()\n",
    "#         self.feature_channel_num=init_features\n",
    "#         self.features = torch.nn.Sequential(\n",
    "#             torch.nn.Conv1d(in_channels,self.feature_channel_num,7,2,3),\n",
    "#             torch.nn.BatchNorm1d(self.feature_channel_num),\n",
    "#             torch.nn.ReLU(),\n",
    "#             torch.nn.MaxPool1d(3,2,1),\n",
    "#         )\n",
    "#         self.DenseBlock1=DenseBlock(layer_num[0],growth_rate,self.feature_channel_num,middele_channels)\n",
    "#         self.feature_channel_num=self.feature_channel_num+layer_num[0]*growth_rate\n",
    "#         self.Transition1=Transition(self.feature_channel_num)\n",
    "#         self.layers=nn.ModuleList([])\n",
    "#         for i in range(1,4):\n",
    "#             self.layers.append(DenseBlock(layer_num[i],growth_rate,self.feature_channel_num//2,middele_channels))\n",
    "#             self.feature_channel_num=self.feature_channel_num//2+layer_num[i]*growth_rate\n",
    "#             if (i==1)|(i==2):\n",
    "#                 self.layers.append(Transition(self.feature_channel_num))\n",
    "#         self.layers.append(torch.nn.AdaptiveAvgPool1d(1))\n",
    "#         self.classifer = torch.nn.Sequential(\n",
    "#             torch.nn.Linear(self.feature_channel_num, self.feature_channel_num//2),\n",
    "#             torch.nn.ReLU(),\n",
    "#             torch.nn.Dropout(0.5),\n",
    "#             torch.nn.Linear(self.feature_channel_num//2, classes),\n",
    "\n",
    "#         )\n",
    "\n",
    "\n",
    "#     def forward(self,x):\n",
    "#         x = self.features(x)\n",
    "\n",
    "#         x = self.DenseBlock1(x)\n",
    "#         x = self.Transition1(x)\n",
    "#         out = x\n",
    "#         for layer in self.layers:\n",
    "#             out = layer(out)    \n",
    "#         out = x.view(-1,self.feature_channel_num)\n",
    "#         out = self.classifer(out)\n",
    "\n",
    "#         return out"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
