{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "846431df",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "import random\n",
    "import time\n",
    "import matplotlib.pyplot as plt\n",
    "from math import sin,pi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "a60a7f32",
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据集参数\n",
    "time_window_size = 10\n",
    "origin_toa_length = 2048\n",
    "binary_tia_length = 1000\n",
    "scale = 10\n",
    "miss_ratio = 0.1\n",
    "error_ratio = 0.1\n",
    "pri_type = 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "86b65512",
   "metadata": {},
   "outputs": [],
   "source": [
    "#generate TOA by a PRI list\n",
    "def TOA_Generator(PRI_list, length): #（PRI的数据，TOA列表长度）\n",
    "    TOA_list = [] #最终输出的TOA列表\n",
    "    TOA = 0 #上一项的TOA时间\n",
    "    TOA_count = 0 #TOA总数量，用于判断是否停止生成\n",
    "    while True:\n",
    "        for pri in PRI_list:\n",
    "            if TOA_count >= length:\n",
    "                return TOA_list\n",
    "            else:\n",
    "                TOA_list.append(TOA)\n",
    "                TOA += pri\n",
    "                TOA_count += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "e6adfe8a",
   "metadata": {},
   "outputs": [],
   "source": [
    "#为TOA序列添加高斯误差\n",
    "def Add_gauss_error_inTOA(TOA_list, scale): #scale越大，误差越大\n",
    "    length = len(TOA_list)\n",
    "    error_list = np.random.normal(0, scale, length)\n",
    "    for i,TOA in enumerate(TOA_list):\n",
    "        TOA_list[i] = TOA+round(error_list[i],0)\n",
    "    return TOA_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "cf241379",
   "metadata": {},
   "outputs": [],
   "source": [
    "#将TOA转化为二进制编码\n",
    "def TOA_to_Binary(TOA_list,time_window_size,length): #time_window_size为每个数组所代表的窗口大小，length是这个TOA_list总共包含多少个时间窗口\n",
    "    TOA_list_index = 0 #当前添加到这一项了\n",
    "    Binary_list = [0]*length\n",
    "    for TOA in TOA_list:\n",
    "        index = int(TOA//time_window_size)\n",
    "        if(index < length):\n",
    "            Binary_list[index-1] = 1\n",
    "        else:\n",
    "            break\n",
    "    return Binary_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "81959c52",
   "metadata": {},
   "outputs": [],
   "source": [
    "#添加随机丢失和错误脉冲\n",
    "def TOA_left_add(Binary_list, left_rate, add_rate): #取值范围0-1\n",
    "    length = len(Binary_list)\n",
    "    for i in range(int(length*left_rate)):\n",
    "        index = random.randint(0,length-1)\n",
    "        Binary_list[index]=0\n",
    "        \n",
    "    for i in range(int(length*add_rate)):\n",
    "        index = random.randint(0,length-1)\n",
    "        Binary_list[index]=1\n",
    "    \n",
    "    return Binary_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "1877a69b",
   "metadata": {},
   "outputs": [],
   "source": [
    "constant = [200]\n",
    "sliding = [100, 150, 200, 250, 300]\n",
    "ds = [110]*6 + [190]*6 + [320]*6\n",
    "wobulated = []\n",
    "#add in wobulated\n",
    "for i in range(30):\n",
    "    wobulated.append(int(150*sin(i*pi/15))+200)\n",
    "    \n",
    "pri_list = sliding\n",
    "pri_list_list = [constant,sliding,ds,wobulated]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "d6d7b006",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Origin TOA: [0, 100, 250, 450, 700, 1000, 1100, 1250, 1450, 1700, 2000, 2100, 2250, 2450, 2700, 3000, 3100, 3250, 3450, 3700, 4000, 4100, 4250, 4450, 4700, 5000, 5100, 5250, 5450, 5700, 6000, 6100, 6250, 6450, 6700, 7000, 7100, 7250, 7450, 7700, 8000, 8100, 8250, 8450, 8700, 9000, 9100, 9250, 9450, 9700]\n",
      "TOA with gauss error: [12.0, 100.0, 247.0, 446.0, 697.0, 994.0, 1105.0, 1240.0, 1459.0, 1710.0, 2003.0, 2090.0, 2276.0, 2459.0, 2700.0, 2987.0, 3107.0, 3236.0, 3461.0, 3702.0, 4008.0, 4094.0, 4229.0, 4447.0, 4704.0, 5001.0, 5109.0, 5231.0, 5436.0, 5717.0, 5996.0, 6108.0, 6238.0, 6464.0, 6705.0, 7002.0, 7095.0, 7240.0, 7458.0, 7709.0, 8011.0, 8104.0, 8240.0, 8461.0, 8712.0, 9031.0, 9096.0, 9237.0, 9454.0, 9699.0]\n",
      "Binary list: [1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]\n",
      "Error Binary list: [1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0]\n"
     ]
    }
   ],
   "source": [
    "#数据集API测试\n",
    "TOA = TOA_Generator(pri_list,origin_toa_length) #先得到一个原始的无误差的TOA\n",
    "print(\"Origin TOA:\",TOA[:50])\n",
    "TOA = Add_gauss_error_inTOA(TOA,scale) #然后为这个列表TOA添加高斯误差\n",
    "print(\"TOA with gauss error:\",TOA[:50])\n",
    "Binary_list = TOA_to_Binary(TOA, time_window_size, binary_tia_length) #将这个TOA列表转换为二进制数组\n",
    "print(\"Binary list:\",Binary_list[:50])\n",
    "Binary_list = TOA_left_add(Binary_list, miss_ratio, error_ratio) #为这个二进制数组添加丢失误差和虚假误差\n",
    "print(\"Error Binary list:\",Binary_list[:50])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "7e210fe4",
   "metadata": {},
   "outputs": [],
   "source": [
    "class DNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        layers =  [binary_tia_length,128,64,32,32,64,128,binary_tia_length,pri_type]   #网络每一层的神经元个数，[2,10,1]说明只有一个隐含层，输入的变量是2个，也对应一个输出。如果是两个变量对应一个输出，那就是[2，10，1]\n",
    "        self.layer1 = nn.Linear(layers[0],layers[1])  #用torh.nn.Linear构建线性层，本质上相当于构建了一个维度为[layers[0],layers[1]]的矩阵，这里面所有的元素都是权重\n",
    "        self.layer2 = nn.Linear(layers[1],layers[2])\n",
    "        self.layer3 = nn.Linear(layers[2],layers[3]) \n",
    "        self.layer4 = nn.Linear(layers[3],layers[4])\n",
    "        self.layer5 = nn.Linear(layers[4],layers[5]) \n",
    "        self.layer6 = nn.Linear(layers[5],layers[6])\n",
    "        self.layer7 = nn.Linear(layers[6],layers[7]) \n",
    "        self.layer8 = nn.Linear(layers[7],layers[8]) \n",
    "        self.relu = nn.ReLU()      #非线性的激活函数。如果只有线性层，那么相当于输出只是输入做了了线性变换的结果，对于线性回归没有问题。但是非线性回归我们需要加入激活函数使输出的结果具有非线性的特征\n",
    "    \n",
    "        print(\"Parameters:\",self.parameters(),self.__dict__.items())\n",
    "    \n",
    "    def forward(self,d):#d就是整个网络的输入\n",
    "        net = self.layer1(d)\n",
    "        net = self.relu(net)#每一个线性层之后都需要加入一个激活函数使其非线性化。\n",
    "        net = self.layer2(net)#但是在网络的最后一层可以不用激活函数，因为有些激活函数会使得输出结果限定在一定的值域里。\n",
    "        net = self.relu(net)\n",
    "        net = self.layer3(net)\n",
    "        net = self.relu(net)\n",
    "        net = self.layer4(net)\n",
    "        net = self.relu(net)\n",
    "        net = self.layer5(net)\n",
    "        net = self.relu(net)\n",
    "        net = self.layer6(net)\n",
    "        net = self.relu(net)\n",
    "        net = self.layer7(net)\n",
    "        net = self.relu(net)\n",
    "        net = self.layer8(net)\n",
    "        return net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "d9c20ccc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Parameters: <generator object Module.parameters at 0x7fb869901ac0> dict_items([('training', True), ('_parameters', OrderedDict()), ('_buffers', OrderedDict()), ('_non_persistent_buffers_set', set()), ('_backward_pre_hooks', OrderedDict()), ('_backward_hooks', OrderedDict()), ('_is_full_backward_hook', None), ('_forward_hooks', OrderedDict()), ('_forward_hooks_with_kwargs', OrderedDict()), ('_forward_pre_hooks', OrderedDict()), ('_forward_pre_hooks_with_kwargs', OrderedDict()), ('_state_dict_hooks', OrderedDict()), ('_state_dict_pre_hooks', OrderedDict()), ('_load_state_dict_pre_hooks', OrderedDict()), ('_load_state_dict_post_hooks', OrderedDict()), ('_modules', OrderedDict([('layer1', Linear(in_features=1000, out_features=128, bias=True)), ('layer2', Linear(in_features=128, out_features=64, bias=True)), ('layer3', Linear(in_features=64, out_features=32, bias=True)), ('layer4', Linear(in_features=32, out_features=32, bias=True)), ('layer5', Linear(in_features=32, out_features=64, bias=True)), ('layer6', Linear(in_features=64, out_features=128, bias=True)), ('layer7', Linear(in_features=128, out_features=1000, bias=True)), ('layer8', Linear(in_features=1000, out_features=4, bias=True)), ('relu', ReLU())]))])\n"
     ]
    }
   ],
   "source": [
    "device = torch.device(\"cpu\") #在跑深度学习的时候最好使用GPU，这样速度会很快。不要的话默认用cpu跑\n",
    "epochs = 50                #这是迭代次数，把所有的训练数据输入到网络里去就叫完成了一次epoch。\n",
    "batch_size = 64\n",
    "learningrate = 1e-3           #学习率，相当于优化算法里的步长，学习率越大，网络参数更新地更加激进。学习率越小，网络学习地更加稳定。\n",
    "net = DNN().to(device=device) #网络的初始化\n",
    "optimizer = torch.optim.Adam(net.parameters(), lr=learningrate)#优化器，不同的优化器选择的优化方式不同，这里用的是随机梯度下降SGD的一种类型，Adam自适应优化器。需要输入网络的参数以及学习率，当然还可以设置其他的参数\n",
    "crossloss  = nn.CrossEntropyLoss()      #损失函数，这里选用的是交叉墒。损失函数也就是用来计算网络输出的结果与对应的标签之间的差距，差距越大，说明网络训练不够好，还需要继续迭代。\n",
    "MinTrainLoss = 1e10          \n",
    "train_loss =[]               #用一个空列表来存储训练时的损失，便于画图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "f8a9f7c2",
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据集构造函数，返回batch_size个数据\n",
    "def build_data(batch_size,pri_list,TOA_length,scale,time_window_size,window_length,left_rate, add_rate):\n",
    "    train_data = []\n",
    "    label = []\n",
    "    for i in range(batch_size):\n",
    "        TOA = TOA_Generator(pri_list,TOA_length)\n",
    "        TOA = Add_gauss_error_inTOA(TOA,scale)\n",
    "        Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "        Binary_list = TOA_left_add(Binary_list, left_rate, add_rate)\n",
    "        train_data.append(Binary_list)\n",
    "        origin_Binary_list = TOA_to_Binary(TOA_Generator(pri_list,TOA_length),time_window_size, window_length)\n",
    "        label.append(origin_Binary_list)\n",
    "        \n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c3334214",
   "metadata": {},
   "outputs": [],
   "source": [
    "#构造一个包含4种重频类型的数据集\n",
    "def build_data_mix(batch_size,pri_list_list,TOA_length,scale,time_window_size,window_length,left_rate, add_rate):\n",
    "    train_data = []\n",
    "    label = []\n",
    "    for i in range(batch_size):\n",
    "        index = random.randint(0,3)\n",
    "        pri_list = pri_list_list[index]\n",
    "        \n",
    "        TOA = TOA_Generator(pri_list,TOA_length)\n",
    "        TOA = Add_gauss_error_inTOA(TOA,scale)\n",
    "        Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "        Binary_list = TOA_left_add(Binary_list, left_rate, add_rate)\n",
    "        train_data.append(Binary_list)\n",
    "        origin_Binary_list = TOA_to_Binary(TOA_Generator(pri_list,TOA_length),time_window_size, window_length)\n",
    "        label.append(origin_Binary_list)\n",
    "        \n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "22f73adf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch:[   10/   50] time:3.87s current_loss:534.21417\n",
      "epoch:[   20/   50] time:3.83s current_loss:374.98013\n",
      "epoch:[   30/   50] time:3.70s current_loss:333.50952\n",
      "epoch:[   40/   50] time:3.69s current_loss:326.43546\n",
      "epoch:[   50/   50] time:3.68s current_loss:324.87640\n",
      "训练总用时: 0.31min\n"
     ]
    }
   ],
   "source": [
    "#神经网络训练函数\n",
    "start = time.time()\n",
    "start0=time.time()\n",
    "for epoch in range(1,epochs+1):\n",
    "    net.train()    #net.train()：在这个模式下，网络的参数会得到更新。对应的还有net.eval()，这就是在验证集上的时候，我们只评价模型，并不对网络参数进行更新。\n",
    "    pt_x_train, pt_y_train = build_data(batch_size,pri_list,origin_toa_length,scale,time_window_size,binary_tia_length,miss_ratio,error_ratio) #返回一批数据，这里的训练数据是每次现场生成的\n",
    "    pt_y_pred = net(pt_x_train) #将tensor放入网络中得到预测值\n",
    "    loss = crossloss(pt_y_pred,pt_y_train)  #计算预测值和对应标签的差别\n",
    "    optimizer.zero_grad()      #在每一次迭代梯度反传更新网络参数时，需要把之前的梯度清0，不然上一次的梯度会累积到这一次。\n",
    "    loss.backward()  # 反向传播\n",
    "    optimizer.step() #优化器进行下一次迭代\n",
    "    if epoch % 10 == 0:#每10个epoch保存一次loss\n",
    "        end = time.time()\n",
    "        print(\"epoch:[%5d/%5d] time:%.2fs current_loss:%.5f\"\n",
    "          %(epoch,epochs,(end-start),loss.item()))\n",
    "        start = time.time()\n",
    "    train_loss.append(loss.item())\n",
    "    if train_loss[-1] < MinTrainLoss:\n",
    "        torch.save(net.state_dict(),\"model.pth\") #保存每一次loss下降的模型\n",
    "        MinTrainLoss = train_loss[-1]\n",
    "end0 = time.time()\n",
    "print(\"训练总用时: %.2fmin\"%((end0-start0)/60)) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "8f44f240",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input : tensor([[0., 0., 0.,  ..., 1., 0., 0.],\n",
      "        [0., 0., 0.,  ..., 0., 1., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        ...,\n",
      "        [1., 0., 0.,  ..., 0., 0., 0.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 1., 0.]])\n",
      "origin: tensor([[0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        ...,\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.]])\n",
      "raw output: [[-6.6608343 -6.5635123 -8.490169  ... -7.0648994 -6.77846    6.282203 ]\n",
      " [-6.8631897 -6.7617593 -8.741393  ... -7.273746  -6.9802966  6.4698076]\n",
      " [-6.6340685 -6.537387  -8.455452  ... -7.0363607 -6.7497587  6.256849 ]\n",
      " ...\n",
      " [-6.542592  -6.447341  -8.343275  ... -6.9434586 -6.6594353  6.172812 ]\n",
      " [-6.6068234 -6.509977  -8.423591  ... -7.009762  -6.7235184  6.230848 ]\n",
      " [-6.8229437 -6.7224627 -8.692053  ... -7.2324677 -6.9401116  6.43164  ]]\n",
      "output: tensor([[0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        ...,\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.],\n",
      "        [0., 0., 0.,  ..., 0., 0., 1.]])\n"
     ]
    }
   ],
   "source": [
    "#评估模型\n",
    "test_error_list = wobulated\n",
    "pt_x_test,pt_y_test = build_data(batch_size,test_error_list,origin_toa_length,scale,time_window_size,binary_tia_length,miss_ratio,error_ratio) #构造一个测试数据\n",
    "model_x_test,model_y_test = build_data(batch_size,pri_list,origin_toa_length,scale,time_window_size,binary_tia_length,miss_ratio,error_ratio) #用于与该模型对应的toa进行对比\n",
    "\n",
    "print(\"input :\",pt_x_test) #输入数据，为添加了各种误差后的二进制信号\n",
    "print(\"origin:\",pt_y_test) #原始数据，为不包含误差的原始信号\n",
    "Dnn = DNN().to(device)\n",
    "Dnn.load_state_dict(torch.load(\"model.pth\",map_location=device))#pytoch 导入模型\n",
    "Dnn.eval()#这里指评价模型，不反传，所以用eval模式\n",
    "result_y_test = Dnn(pt_x_test) #获取模型推理结果\n",
    "np_y_test = result_y_test.detach().cpu().numpy()#输出结果torch tensor，需要转化为numpy类型来进行可视化\n",
    "print(\"raw output:\",np_y_test)\n",
    "for row in range(len(np_y_test)):\n",
    "    for index,num in enumerate(np_y_test[0]): #因为输出的结果是浮点数，将其转化为二进制值，这里取小于0的为0，大于等于1的为1\n",
    "        if(num<=0):\n",
    "            np_y_test[row][index] = 0\n",
    "        else:\n",
    "            np_y_test[row][index] = 1\n",
    "output = torch.Tensor(np_y_test) #输出模型推理结果\n",
    "print(\"output:\",output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "9cbd0b43",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "64000\n",
      "64000\n",
      "accurate: 100.0 %\n"
     ]
    }
   ],
   "source": [
    "#计算有多少相同的项\n",
    "same = 0\n",
    "for row in range(len(pt_x_test)):\n",
    "    for i in range(len(pt_x_test[row])):\n",
    "        if output[row][i] == model_y_test[row][i]:\n",
    "            same += 1\n",
    "        \n",
    "print(len(pt_x_test[0])*len(pt_x_test))\n",
    "print(same)\n",
    "        \n",
    "print(\"accurate:\",same/(len(pt_x_test[0])*len(pt_x_test))*100,\"%\") #推理结果与原始数据有多少是相等的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "c9e9c1fa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "net_state_dict类型： <class 'collections.OrderedDict'>\n",
      "net_state_dict管理的参数:  odict_keys(['layer1.weight', 'layer1.bias', 'layer2.weight', 'layer2.bias', 'layer3.weight', 'layer3.bias', 'layer4.weight', 'layer4.bias', 'layer5.weight', 'layer5.bias', 'layer6.weight', 'layer6.bias', 'layer7.weight', 'layer7.bias'])\n",
      "参数名:  layer1.weight \t大小: torch.Size([128, 1500]) \t类型:  torch.float32\n",
      "参数名:  layer1.bias \t大小: torch.Size([128]) \t类型:  torch.float32\n",
      "参数名:  layer2.weight \t大小: torch.Size([64, 128]) \t类型:  torch.float32\n",
      "参数名:  layer2.bias \t大小: torch.Size([64]) \t类型:  torch.float32\n",
      "参数名:  layer3.weight \t大小: torch.Size([32, 64]) \t类型:  torch.float32\n",
      "参数名:  layer3.bias \t大小: torch.Size([32]) \t类型:  torch.float32\n",
      "参数名:  layer4.weight \t大小: torch.Size([32, 32]) \t类型:  torch.float32\n",
      "参数名:  layer4.bias \t大小: torch.Size([32]) \t类型:  torch.float32\n",
      "参数名:  layer5.weight \t大小: torch.Size([64, 32]) \t类型:  torch.float32\n",
      "参数名:  layer5.bias \t大小: torch.Size([64]) \t类型:  torch.float32\n",
      "参数名:  layer6.weight \t大小: torch.Size([128, 64]) \t类型:  torch.float32\n",
      "参数名:  layer6.bias \t大小: torch.Size([128]) \t类型:  torch.float32\n",
      "参数名:  layer7.weight \t大小: torch.Size([1500, 128]) \t类型:  torch.float32\n",
      "参数名:  layer7.bias \t大小: torch.Size([1500]) \t类型:  torch.float32\n"
     ]
    }
   ],
   "source": [
    "# 获取网络当前参数\n",
    "net_state_dict = Dnn.state_dict()\n",
    "\n",
    "print('net_state_dict类型：', type(net_state_dict))\n",
    "print('net_state_dict管理的参数: ', net_state_dict.keys())\n",
    "for key, value in net_state_dict.items():\n",
    "    print('参数名: ', key, '\\t大小:',  value.shape, '\\t类型: ',  value.dtype)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "dfe89977",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "参数名:  layer1.weight \t大小: torch.Size([128, 1500]) \t类型:  torch.float32\n",
      "参数名:  layer1.bias \t大小: torch.Size([128]) \t类型:  torch.float32\n",
      "参数名:  layer2.weight \t大小: torch.Size([64, 128]) \t类型:  torch.float32\n",
      "参数名:  layer2.bias \t大小: torch.Size([64]) \t类型:  torch.float32\n",
      "参数名:  layer3.weight \t大小: torch.Size([32, 64]) \t类型:  torch.float32\n",
      "参数名:  layer3.bias \t大小: torch.Size([32]) \t类型:  torch.float32\n",
      "参数名:  layer4.weight \t大小: torch.Size([32, 32]) \t类型:  torch.float32\n",
      "参数名:  layer4.bias \t大小: torch.Size([32]) \t类型:  torch.float32\n",
      "参数名:  layer5.weight \t大小: torch.Size([64, 32]) \t类型:  torch.float32\n",
      "参数名:  layer5.bias \t大小: torch.Size([64]) \t类型:  torch.float32\n",
      "参数名:  layer6.weight \t大小: torch.Size([128, 64]) \t类型:  torch.float32\n",
      "参数名:  layer6.bias \t大小: torch.Size([128]) \t类型:  torch.float32\n",
      "参数名:  layer7.weight \t大小: torch.Size([1500, 128]) \t类型:  torch.float32\n",
      "参数名:  layer7.bias \t大小: torch.Size([1500]) \t类型:  torch.float32\n"
     ]
    }
   ],
   "source": [
    "#打印参数到文件\n",
    "fd = open(\"model.txt\",\"w\")\n",
    "\n",
    "net_state_dict = Dnn.state_dict()\n",
    "for key, value in net_state_dict.items():\n",
    "    print('参数名: ', key, '\\t大小:',  value.shape, '\\t类型: ',  value.dtype)\n",
    "    fd.write('参数名: ' + str(key) + '\\t大小:' + str(value.shape) + '\\t类型: ' + str(value.dtype) + \"\\n\")\n",
    "    for row in value:\n",
    "        try:\n",
    "            for column in row:\n",
    "                fd.write(str(column)+\" \")\n",
    "            fd.write(\"\\n\")\n",
    "        except:\n",
    "            fd.write(str(row)+\" \")\n",
    "    fd.write(\"\\n\")\n",
    "    \n",
    "fd.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "c36a324f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[-0.0066,  0.0324,  0.1752,  ...,  0.0353,  0.0174, -0.1856],\n",
      "        [-0.0377,  0.0303,  0.0386,  ...,  0.0827,  0.0755, -0.0741],\n",
      "        [ 0.0135, -0.1344,  0.1721,  ...,  0.0998, -0.1638, -0.0993],\n",
      "        ...,\n",
      "        [ 0.1559,  0.0811,  0.0861,  ...,  0.0525,  0.0975,  0.1646],\n",
      "        [-0.1240, -0.0553,  0.1166,  ..., -0.0749,  0.1776,  0.0263],\n",
      "        [-0.1548, -0.0615,  0.1152,  ..., -0.0213, -0.1325,  0.0641]])\n"
     ]
    }
   ],
   "source": [
    "print(net_state_dict[\"layer4.weight\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "99e7ba08",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个量化的模型实例\n",
    "model_int8 = torch.quantization.quantize_dynamic(\n",
    "    model=Dnn,  # 原始模型\n",
    "    qconfig_spec={torch.nn.Linear},  # 要动态量化的NN算子\n",
    "    dtype=torch.qint8)  # 将权重量化为：float16 \\ qint8"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "b4dbee5f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input : tensor([[1., 0., 0.,  ..., 0., 0., 0.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 0.],\n",
      "        [1., 0., 0.,  ..., 0., 1., 0.],\n",
      "        ...,\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 1., 1.],\n",
      "        [1., 0., 1.,  ..., 0., 0., 1.]])\n",
      "origin: tensor([[1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        ...,\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.]])\n",
      "raw output: [[ 2.698787   2.6998665  2.6652281 ...  2.677638  -5.7994046  2.677673 ]\n",
      " [ 2.7116587  2.7448318  2.7014406 ...  2.684789  -5.8110175  2.674584 ]\n",
      " [ 2.7129745  2.7278984  2.699324  ...  2.670201  -5.7946563  2.6775017]\n",
      " ...\n",
      " [ 2.6997595  2.7273834  2.6878822 ...  2.67043   -5.7727456  2.6579938]\n",
      " [ 2.6974142  2.7201753  2.6909716 ...  2.6689997 -5.7844734  2.690831 ]\n",
      " [ 2.6917505  2.7218344  2.6917725 ...  2.682043  -5.784645   2.6725245]]\n",
      "q output: tensor([[1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        ...,\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.],\n",
      "        [1., 1., 1.,  ..., 1., 0., 1.]])\n"
     ]
    }
   ],
   "source": [
    "#评估量化后的模型\n",
    "q_test_pri_list = [100, 150, 200, 250, 300]\n",
    "q_pt_x_test,q_pt_y_test = build_data(64,test_error_list,10000,30,200,1500,0.2,0.2) #构造一个测试数据\n",
    "print(\"input :\",q_pt_x_test) #输入数据，为添加了各种误差后的二进制信号\n",
    "print(\"origin:\",q_pt_y_test) #原始数据，为不包含误差的原始信号\n",
    "q_result_y_test = model_int8(q_pt_x_test) #获取模型推理结果\n",
    "q_np_y_test = q_result_y_test.detach().cpu().numpy()#输出结果torch tensor，需要转化为numpy类型来进行可视化\n",
    "print(\"raw output:\",q_np_y_test)\n",
    "for row in range(len(q_np_y_test)):\n",
    "    for index,num in enumerate(q_np_y_test[0]): #因为输出的结果是浮点数，将其转化为二进制值，这里取小于0的为0，大于等于1的为1\n",
    "        if(num<=0):\n",
    "            q_np_y_test[row][index] = 0\n",
    "        else:\n",
    "            q_np_y_test[row][index] = 1\n",
    "q_output = torch.Tensor(q_np_y_test) #输出模型推理结果\n",
    "print(\"q output:\",q_output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "44dc7633",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "96000\n",
      "96000\n",
      "accurate: 100.0 %\n"
     ]
    }
   ],
   "source": [
    "#计算量化后有多少相同的项\n",
    "same = 0\n",
    "for row in range(len(q_pt_x_test)):\n",
    "    for i in range(len(q_pt_x_test[0])):\n",
    "        if q_output[row][i] == q_pt_y_test[row][i]:\n",
    "            same += 1\n",
    "        \n",
    "print(len(q_pt_x_test[0])*len(q_pt_x_test))\n",
    "print(same)\n",
    "        \n",
    "print(\"accurate:\",same/(len(q_pt_x_test[0])*len(q_pt_x_test))*100,\"%\") #推理结果与原始数据有多少是相等的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "d5e2dabb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "量化模型类型： <class 'collections.OrderedDict'>\n",
      "量化模型管理的参数:  odict_keys(['layer1.scale', 'layer1.zero_point', 'layer1._packed_params.dtype', 'layer1._packed_params._packed_params', 'layer2.scale', 'layer2.zero_point', 'layer2._packed_params.dtype', 'layer2._packed_params._packed_params', 'layer3.scale', 'layer3.zero_point', 'layer3._packed_params.dtype', 'layer3._packed_params._packed_params', 'layer4.scale', 'layer4.zero_point', 'layer4._packed_params.dtype', 'layer4._packed_params._packed_params', 'layer5.scale', 'layer5.zero_point', 'layer5._packed_params.dtype', 'layer5._packed_params._packed_params', 'layer6.scale', 'layer6.zero_point', 'layer6._packed_params.dtype', 'layer6._packed_params._packed_params', 'layer7.scale', 'layer7.zero_point', 'layer7._packed_params.dtype', 'layer7._packed_params._packed_params'])\n",
      "参数名:  layer1.scale \t大小: tensor(1.)\n",
      "参数名:  layer1.zero_point \t大小: tensor(0)\n",
      "参数名:  layer1._packed_params.dtype \t大小: torch.qint8\n",
      "参数名:  layer1._packed_params._packed_params \t大小: (tensor([[-0.0018, -0.0294, -0.0264,  ..., -0.0217, -0.0217, -0.0070],\n",
      "        [-0.0077, -0.0301, -0.0250,  ..., -0.0095, -0.0106, -0.0308],\n",
      "        [ 0.0073,  0.0033, -0.0095,  ...,  0.0044,  0.0140, -0.0253],\n",
      "        ...,\n",
      "        [ 0.0125, -0.0037, -0.0029,  ...,  0.0217,  0.0162,  0.0246],\n",
      "        [-0.0162,  0.0092, -0.0121,  ..., -0.0195, -0.0029,  0.0158],\n",
      "        [-0.0059, -0.0018, -0.0037,  ...,  0.0411, -0.0066, -0.0022]],\n",
      "       size=(128, 1500), dtype=torch.qint8,\n",
      "       quantization_scheme=torch.per_tensor_affine, scale=0.000367237749742344,\n",
      "       zero_point=0), Parameter containing:\n",
      "tensor([ 0.0170, -0.0225, -0.0334,  0.0214, -0.0130, -0.0087, -0.0001, -0.0156,\n",
      "        -0.0059,  0.0056, -0.0131,  0.0094,  0.0381,  0.0217,  0.0166, -0.0159,\n",
      "         0.0089, -0.0068, -0.0088,  0.0077,  0.0144,  0.0386,  0.0289,  0.0029,\n",
      "        -0.0064, -0.0173, -0.0271,  0.0167,  0.0344,  0.0030,  0.0408,  0.0156,\n",
      "         0.0199,  0.0083,  0.0252, -0.0016, -0.0237, -0.0013, -0.0295,  0.0117,\n",
      "         0.0109, -0.0211, -0.0271, -0.0051, -0.0158,  0.0333,  0.0421, -0.0194,\n",
      "        -0.0043,  0.0113,  0.0190,  0.0267, -0.0092, -0.0019, -0.0227,  0.0144,\n",
      "        -0.0098,  0.0173,  0.0002,  0.0330,  0.0077, -0.0014,  0.0151, -0.0105,\n",
      "         0.0328,  0.0168,  0.0221, -0.0121, -0.0166, -0.0010, -0.0294, -0.0158,\n",
      "        -0.0030, -0.0188, -0.0267, -0.0234, -0.0075,  0.0133,  0.0374,  0.0029,\n",
      "        -0.0304,  0.0351, -0.0002,  0.0272, -0.0279,  0.0023, -0.0170, -0.0112,\n",
      "         0.0117, -0.0008, -0.0074,  0.0158,  0.0249, -0.0211,  0.0003,  0.0218,\n",
      "         0.0035,  0.0215,  0.0110, -0.0308,  0.0072, -0.0222, -0.0106, -0.0044,\n",
      "        -0.0179,  0.0068,  0.0337,  0.0119,  0.0238, -0.0286,  0.0009, -0.0025,\n",
      "        -0.0004, -0.0251, -0.0051,  0.0355, -0.0013, -0.0214,  0.0011,  0.0117,\n",
      "         0.0193, -0.0286, -0.0145,  0.0119,  0.0339,  0.0369,  0.0001,  0.0173],\n",
      "       requires_grad=True))\n",
      "参数名:  layer2.scale \t大小: tensor(1.)\n",
      "参数名:  layer2.zero_point \t大小: tensor(0)\n",
      "参数名:  layer2._packed_params.dtype \t大小: torch.qint8\n",
      "参数名:  layer2._packed_params._packed_params \t大小: (tensor([[ 0.0257,  0.0488,  0.0872,  ...,  0.0727,  0.0137,  0.0684],\n",
      "        [-0.0325, -0.0522, -0.0650,  ..., -0.0248,  0.0128,  0.0205],\n",
      "        [ 0.0197,  0.0530,  0.0393,  ..., -0.0813, -0.0753,  0.0411],\n",
      "        ...,\n",
      "        [-0.0009,  0.0393,  0.0068,  ..., -0.0470, -0.0778, -0.0017],\n",
      "        [ 0.0342,  0.0094, -0.0205,  ...,  0.0898,  0.0325,  0.0359],\n",
      "        [ 0.0299, -0.0898, -0.0163,  ..., -0.0496, -0.0154,  0.0565]],\n",
      "       size=(64, 128), dtype=torch.qint8,\n",
      "       quantization_scheme=torch.per_tensor_affine, scale=0.0008553412044420838,\n",
      "       zero_point=0), Parameter containing:\n",
      "tensor([ 0.0856,  0.0078,  0.0417, -0.0610,  0.0051,  0.0930,  0.0417, -0.0116,\n",
      "        -0.0111,  0.0452, -0.0309, -0.0664,  0.0271, -0.0079, -0.0533,  0.0726,\n",
      "         0.0702, -0.0355,  0.0227,  0.0428, -0.0216,  0.0552,  0.0753,  0.0295,\n",
      "        -0.0797,  0.0196, -0.0157,  0.0774,  0.0382, -0.0383,  0.0163, -0.0211,\n",
      "         0.0618,  0.0100,  0.0949,  0.0664,  0.0474,  0.0606, -0.0015, -0.0118,\n",
      "         0.0114,  0.0091, -0.0292,  0.0261,  0.0212,  0.0644, -0.0700, -0.0545,\n",
      "         0.0676,  0.0155, -0.0266,  0.0794, -0.0108, -0.0272,  0.0554,  0.0959,\n",
      "        -0.0094, -0.0875,  0.0982, -0.0082, -0.0352, -0.0245,  0.0974, -0.0210],\n",
      "       requires_grad=True))\n",
      "参数名:  layer3.scale \t大小: tensor(1.)\n",
      "参数名:  layer3.zero_point \t大小: tensor(0)\n",
      "参数名:  layer3._packed_params.dtype \t大小: torch.qint8\n",
      "参数名:  layer3._packed_params._packed_params \t大小: (tensor([[ 0.0689, -0.0678,  0.0912,  ...,  0.0678, -0.0678,  0.0456],\n",
      "        [-0.0878, -0.0934,  0.0645,  ...,  0.0111,  0.0156, -0.1045],\n",
      "        [ 0.0411, -0.0011,  0.0534,  ..., -0.0778, -0.0934,  0.1323],\n",
      "        ...,\n",
      "        [ 0.0778, -0.0211,  0.0156,  ...,  0.0511,  0.0267,  0.0100],\n",
      "        [ 0.0712,  0.1134, -0.1123,  ...,  0.0178, -0.0534, -0.0434],\n",
      "        [ 0.1090,  0.0100, -0.0489,  ..., -0.0278,  0.0611,  0.0289]],\n",
      "       size=(32, 64), dtype=torch.qint8,\n",
      "       quantization_scheme=torch.per_tensor_affine, scale=0.0011118127731606364,\n",
      "       zero_point=0), Parameter containing:\n",
      "tensor([ 0.1111,  0.0627, -0.0444, -0.1116,  0.0358, -0.0598, -0.1307, -0.0351,\n",
      "        -0.0036, -0.0988,  0.0040, -0.0254,  0.1233, -0.0064,  0.0166, -0.0902,\n",
      "         0.0639, -0.0847,  0.0512, -0.0323, -0.0503, -0.0289,  0.0342, -0.0041,\n",
      "         0.1362,  0.0236,  0.1219, -0.1094, -0.0203,  0.1003,  0.1033, -0.0254],\n",
      "       requires_grad=True))\n",
      "参数名:  layer4.scale \t大小: tensor(1.)\n",
      "参数名:  layer4.zero_point \t大小: tensor(0)\n",
      "参数名:  layer4._packed_params.dtype \t大小: torch.qint8\n",
      "参数名:  layer4._packed_params._packed_params \t大小: (tensor([[-0.0061,  0.0321,  0.1756,  ...,  0.0351,  0.0168, -0.1862],\n",
      "        [-0.0382,  0.0305,  0.0382,  ...,  0.0824,  0.0748, -0.0748],\n",
      "        [ 0.0137, -0.1343,  0.1725,  ...,  0.0992, -0.1633, -0.0992],\n",
      "        ...,\n",
      "        [ 0.1557,  0.0809,  0.0855,  ...,  0.0519,  0.0977,  0.1649],\n",
      "        [-0.1237, -0.0550,  0.1160,  ..., -0.0748,  0.1771,  0.0260],\n",
      "        [-0.1542, -0.0611,  0.1145,  ..., -0.0214, -0.1328,  0.0641]],\n",
      "       size=(32, 32), dtype=torch.qint8,\n",
      "       quantization_scheme=torch.per_tensor_affine, scale=0.0015265566762536764,\n",
      "       zero_point=0), Parameter containing:\n",
      "tensor([ 0.1339,  0.0290, -0.1622, -0.0284,  0.0741, -0.0163, -0.1432,  0.1814,\n",
      "        -0.1282,  0.1782, -0.0404,  0.1461, -0.1334, -0.0105,  0.1166, -0.1090,\n",
      "         0.0132,  0.1054,  0.1051, -0.1786, -0.0237,  0.0551,  0.1021,  0.0580,\n",
      "         0.1611,  0.0715,  0.1264, -0.1745,  0.0196, -0.1565,  0.0648, -0.0915],\n",
      "       requires_grad=True))\n",
      "参数名:  layer5.scale \t大小: tensor(1.)\n",
      "参数名:  layer5.zero_point \t大小: tensor(0)\n",
      "参数名:  layer5._packed_params.dtype \t大小: torch.qint8\n",
      "参数名:  layer5._packed_params._packed_params \t大小: (tensor([[ 0.0232,  0.0386,  0.1159,  ..., -0.1067, -0.1793, -0.0649],\n",
      "        [ 0.1438,  0.1237,  0.0974,  ...,  0.0417,  0.1159,  0.0325],\n",
      "        [ 0.0325,  0.0603, -0.1561,  ...,  0.0510, -0.0819, -0.1592],\n",
      "        ...,\n",
      "        [-0.1638,  0.0433, -0.0587,  ..., -0.0912, -0.1623,  0.0185],\n",
      "        [ 0.1577, -0.0124,  0.1267,  ..., -0.1314,  0.0665,  0.1530],\n",
      "        [ 0.1376,  0.0603,  0.0325,  ..., -0.1638,  0.0773, -0.0386]],\n",
      "       size=(64, 32), dtype=torch.qint8,\n",
      "       quantization_scheme=torch.per_tensor_affine, scale=0.0015457207337021828,\n",
      "       zero_point=0), Parameter containing:\n",
      "tensor([-0.1114,  0.0120, -0.0054,  0.1418, -0.0187, -0.0805,  0.1648, -0.1186,\n",
      "         0.0989, -0.0142,  0.1246, -0.1372, -0.0513, -0.0416,  0.0971, -0.0957,\n",
      "        -0.1338,  0.0223,  0.0198, -0.0023,  0.1154,  0.1560,  0.0725,  0.1390,\n",
      "        -0.0303, -0.0993, -0.0977,  0.1848,  0.0560,  0.0356,  0.0265, -0.0728,\n",
      "        -0.1595,  0.1208,  0.0709,  0.0407,  0.1001, -0.1246, -0.1165,  0.1390,\n",
      "        -0.1315,  0.1244, -0.0840,  0.0358, -0.1393, -0.0630, -0.1196, -0.1734,\n",
      "        -0.0596, -0.0908,  0.0639,  0.0603, -0.1462,  0.0240, -0.1332,  0.1684,\n",
      "        -0.1882,  0.1407, -0.1921, -0.0036, -0.0749,  0.0363,  0.1097,  0.0544],\n",
      "       requires_grad=True))\n",
      "参数名:  layer6.scale \t大小: tensor(1.)\n",
      "参数名:  layer6.zero_point \t大小: tensor(0)\n",
      "参数名:  layer6._packed_params.dtype \t大小: torch.qint8\n",
      "参数名:  layer6._packed_params._packed_params \t大小: (tensor([[-0.1056,  0.0458,  0.0293,  ..., -0.0692,  0.0035,  0.0317],\n",
      "        [ 0.0657, -0.0012, -0.0364,  ...,  0.0692,  0.0774, -0.0188],\n",
      "        [ 0.1021,  0.0751,  0.0446,  ...,  0.1009, -0.0293, -0.0857],\n",
      "        ...,\n",
      "        [ 0.0199,  0.1103, -0.0716,  ..., -0.0986,  0.0810, -0.0317],\n",
      "        [-0.0352,  0.0528,  0.1173,  ...,  0.1385, -0.0927,  0.0997],\n",
      "        [-0.0552,  0.0927,  0.0153,  ...,  0.0598, -0.1326, -0.1127]],\n",
      "       size=(128, 64), dtype=torch.qint8,\n",
      "       quantization_scheme=torch.per_tensor_affine, scale=0.001173466327600181,\n",
      "       zero_point=0), Parameter containing:\n",
      "tensor([-0.0444, -0.1378, -0.1092,  0.1176, -0.1133, -0.1245,  0.0988,  0.1115,\n",
      "         0.0934, -0.0320, -0.0734,  0.0980,  0.1085, -0.0381, -0.1296, -0.0003,\n",
      "        -0.0029, -0.1117, -0.0647,  0.0546, -0.1108,  0.1216, -0.0227,  0.0671,\n",
      "         0.1104,  0.0196,  0.1291, -0.0706,  0.1206, -0.0518,  0.0017, -0.0394,\n",
      "         0.0285, -0.0588, -0.0695, -0.1043, -0.0555, -0.0190,  0.1150, -0.1138,\n",
      "         0.0387, -0.0706,  0.0389,  0.0449, -0.0553, -0.0514,  0.0808, -0.1301,\n",
      "        -0.1205,  0.0378,  0.1043,  0.1086,  0.0489, -0.1034,  0.0633,  0.0283,\n",
      "        -0.0081, -0.1119,  0.0202,  0.0529, -0.0196, -0.1008,  0.0620,  0.0027,\n",
      "        -0.0243, -0.1135, -0.0559,  0.0186,  0.0874, -0.1152,  0.0379,  0.0379,\n",
      "        -0.0910,  0.0028,  0.0565,  0.1012, -0.0423, -0.0234, -0.1145,  0.0362,\n",
      "         0.0267,  0.1287, -0.0343,  0.0801, -0.0864, -0.1223, -0.0606,  0.0208,\n",
      "        -0.1180,  0.0685,  0.1022,  0.0595, -0.0029, -0.0210, -0.0147, -0.0873,\n",
      "         0.1261,  0.0076, -0.1296, -0.1229,  0.0910,  0.0533, -0.0453, -0.1443,\n",
      "        -0.0707, -0.1100,  0.0675,  0.1076, -0.0495,  0.1039, -0.0893, -0.0052,\n",
      "        -0.0622, -0.1166,  0.0662,  0.1323,  0.0180,  0.1256,  0.0939, -0.0087,\n",
      "         0.0998,  0.0695, -0.1057,  0.0169, -0.0776,  0.0531, -0.0996, -0.1224],\n",
      "       requires_grad=True))\n",
      "参数名:  layer7.scale \t大小: tensor(1.)\n",
      "参数名:  layer7.zero_point \t大小: tensor(0)\n",
      "参数名:  layer7._packed_params.dtype \t大小: torch.qint8\n",
      "参数名:  layer7._packed_params._packed_params \t大小: (tensor([[ 0.0249, -0.0657, -0.0985,  ..., -0.0836, -0.0139,  0.0219],\n",
      "        [ 0.0408,  0.0358, -0.0527,  ..., -0.0836,  0.0199, -0.0597],\n",
      "        [ 0.0657, -0.0866,  0.0388,  ...,  0.0219,  0.0279,  0.0090],\n",
      "        ...,\n",
      "        [-0.0090,  0.0149, -0.0746,  ..., -0.0965,  0.0219, -0.0547],\n",
      "        [-0.0706,  0.0806,  0.0358,  ...,  0.1124,  0.0816,  0.0517],\n",
      "        [-0.0298,  0.0040, -0.0398,  ..., -0.0557,  0.0458,  0.0209]],\n",
      "       size=(1500, 128), dtype=torch.qint8,\n",
      "       quantization_scheme=torch.per_tensor_affine, scale=0.0009948292281478643,\n",
      "       zero_point=0), Parameter containing:\n",
      "tensor([-0.0030,  0.0738,  0.0896,  ..., -0.0433, -0.0450, -0.0372],\n",
      "       requires_grad=True))\n"
     ]
    }
   ],
   "source": [
    "# 获取量化网络后的参数\n",
    "qstate = model_int8.state_dict()\n",
    "\n",
    "print('量化模型类型：', type(qstate))\n",
    "print('量化模型管理的参数: ', qstate.keys())\n",
    "for key, value in qstate.items():\n",
    "    print('参数名: ', key, '\\t大小:',  value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "99c52ab7",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
