{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "846431df",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "import random\n",
    "import time\n",
    "import matplotlib.pyplot as plt\n",
    "from math import sin,pi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "a60a7f32",
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据集参数\n",
    "time_window_size = 10\n",
    "origin_toa_length = 2048\n",
    "binary_tia_length = 1000\n",
    "scale = 10\n",
    "miss_ratio = 0.1\n",
    "error_ratio = 0.1\n",
    "pri_type = 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "86b65512",
   "metadata": {},
   "outputs": [],
   "source": [
    "#generate TOA by a PRI list\n",
    "def TOA_Generator(PRI_list, length): #（PRI的数据，TOA列表长度）\n",
    "    TOA_list = [] #最终输出的TOA列表\n",
    "    TOA = 0 #上一项的TOA时间\n",
    "    TOA_count = 0 #TOA总数量，用于判断是否停止生成\n",
    "    \n",
    "    pri_len = len(PRI_list)\n",
    "    pri_offset = random.randint(0,pri_len-1)\n",
    "    while True:\n",
    "        if TOA_count >= length:\n",
    "            return TOA_list\n",
    "        \n",
    "        TOA_list.append(TOA)\n",
    "        TOA += PRI_list[pri_offset%pri_len]\n",
    "        pri_offset += 1\n",
    "        TOA_count += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "e6adfe8a",
   "metadata": {},
   "outputs": [],
   "source": [
    "#为TOA序列添加高斯误差\n",
    "def Add_gauss_error_inTOA(TOA_list, scale): #scale越大，误差越大\n",
    "    length = len(TOA_list)\n",
    "    error_list = np.random.normal(0, scale, length)\n",
    "    for i,TOA in enumerate(TOA_list):\n",
    "        TOA_list[i] = TOA+round(error_list[i],0)\n",
    "    return TOA_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "cf241379",
   "metadata": {},
   "outputs": [],
   "source": [
    "#将TOA转化为二进制编码\n",
    "def TOA_to_Binary(TOA_list,time_window_size,length): #time_window_size为每个数组所代表的窗口大小，length是这个TOA_list总共包含多少个时间窗口\n",
    "    Binary_list = [0]*length\n",
    "    for TOA in TOA_list:\n",
    "        index = int(TOA//time_window_size)\n",
    "        if(index < length):\n",
    "            Binary_list[index] = 1\n",
    "        else:\n",
    "            break\n",
    "    return Binary_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "81959c52",
   "metadata": {},
   "outputs": [],
   "source": [
    "#添加随机丢失和错误脉冲\n",
    "def TOA_left_add(Binary_list, left_rate, add_rate): #取值范围0-1\n",
    "    length = len(Binary_list)\n",
    "    for i in range(int(length*left_rate)):\n",
    "        index = random.randint(0,length-1)\n",
    "        Binary_list[index]=0\n",
    "        \n",
    "    for i in range(int(length*add_rate)):\n",
    "        index = random.randint(0,length-1)\n",
    "        Binary_list[index]=1\n",
    "    \n",
    "    return Binary_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "1877a69b",
   "metadata": {},
   "outputs": [],
   "source": [
    "constant = [200]\n",
    "sliding = [100, 150, 200, 250, 300]\n",
    "ds = [110]*6 + [190]*6 + [320]*6\n",
    "wobulated = []\n",
    "#add in wobulated\n",
    "for i in range(30):\n",
    "    wobulated.append(int(150*sin(i*pi/15))+200)\n",
    "    \n",
    "pri_list = sliding\n",
    "pri_list_list = [constant,sliding,ds,wobulated]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "d6d7b006",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Origin TOA: [0, 200, 450, 750, 850, 1000, 1200, 1450, 1750, 1850, 2000, 2200, 2450, 2750, 2850, 3000, 3200, 3450, 3750, 3850, 4000, 4200, 4450, 4750, 4850, 5000, 5200, 5450, 5750, 5850, 6000, 6200, 6450, 6750, 6850, 7000, 7200, 7450, 7750, 7850, 8000, 8200, 8450, 8750, 8850, 9000, 9200, 9450, 9750, 9850]\n",
      "TOA with gauss error: [-3.0, 204.0, 442.0, 759.0, 830.0, 996.0, 1195.0, 1453.0, 1755.0, 1857.0, 1988.0, 2201.0, 2447.0, 2749.0, 2852.0, 2987.0, 3188.0, 3442.0, 3769.0, 3844.0, 4005.0, 4196.0, 4454.0, 4745.0, 4851.0, 4993.0, 5205.0, 5453.0, 5752.0, 5852.0, 6005.0, 6206.0, 6447.0, 6730.0, 6859.0, 6997.0, 7203.0, 7447.0, 7756.0, 7837.0, 8013.0, 8193.0, 8446.0, 8752.0, 8852.0, 9019.0, 9216.0, 9445.0, 9749.0, 9862.0]\n",
      "Binary list: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n",
      "Error Binary list: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0]\n"
     ]
    }
   ],
   "source": [
    "#数据集API测试\n",
    "TOA = TOA_Generator(pri_list,origin_toa_length) #先得到一个原始的无误差的TOA\n",
    "print(\"Origin TOA:\",TOA[:50])\n",
    "TOA = Add_gauss_error_inTOA(TOA,scale) #然后为这个列表TOA添加高斯误差\n",
    "print(\"TOA with gauss error:\",TOA[:50])\n",
    "Binary_list = TOA_to_Binary(TOA, time_window_size, binary_tia_length) #将这个TOA列表转换为二进制数组\n",
    "print(\"Binary list:\",Binary_list[:50])\n",
    "Binary_list = TOA_left_add(Binary_list, miss_ratio, error_ratio) #为这个二进制数组添加丢失误差和虚假误差\n",
    "print(\"Error Binary list:\",Binary_list[:50])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7e210fe4",
   "metadata": {},
   "outputs": [],
   "source": [
    "class DNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        layers =  [binary_tia_length,128,64,32,pri_type]   #网络每一层的神经元个数，[2,10,1]说明只有一个隐含层，输入的变量是2个，也对应一个输出。如果是两个变量对应一个输出，那就是[2，10，1]\n",
    "        \n",
    "#         self.fc = nn.Sequential(\n",
    "#             nn.Conv2d(33, layers[1], kernel_size=(3, 3)),\n",
    "#             nn.BatchNorm2d(layers[1]),\n",
    "#             nn.ReLU(True),\n",
    "#             nn.MaxPool2d(kernel_size=3, stride=2),\n",
    "            \n",
    "#             nn.Linear(layers[1], layers[2]),\n",
    "#             nn.ReLU(),\n",
    "#             nn.Linear(layers[2], layers[3]),\n",
    "#             nn.ReLU(),\n",
    "#             nn.Linear(layers[3], layers[4])\n",
    "#         )\n",
    "        \n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Linear(layers[0], layers[1]),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(layers[1], layers[2]),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(layers[2], layers[3]),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(layers[3], layers[4])\n",
    "        )\n",
    "        \n",
    "#         self.layers_list = []\n",
    "#         for i in range(len(layers)-1):\n",
    "#             self.layers_list.append(nn.Linear(layers[i],layers[i+1]))\n",
    "            \n",
    "#         print(\"Total\",len(self.layers_list),\"layers\")\n",
    "#         print(\"Parameters:\",self.parameters(),self.__dict__.items())\n",
    "        \n",
    "#         self.relu = nn.ReLU()      #非线性的激活函数。如果只有线性层，那么相当于输出只是输入做了了线性变换的结果，对于线性回归没有问题。但是非线性回归我们需要加入激活函数使输出的结果具有非线性的特征\n",
    "    \n",
    "    def forward(self,x):#d就是整个网络的输入\n",
    "        x = self.fc(x)\n",
    "        return torch.sigmoid(x)\n",
    "#         net = d\n",
    "#         for i in range(len(self.layers_list)-1):\n",
    "#             net = self.layers_list[i](net) #每一个线性层之后都需要加入一个激活函数使其非线性化。\n",
    "#             net = self.relu(net) #但是在网络的最后一层可以不用激活函数，因为有些激活函数会使得输出结果限定在一定的值域里。\n",
    "            \n",
    "#         net = self.layers_list[-1](net)\n",
    "            \n",
    "#         return net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "d9c20ccc",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device(\"cpu\") #在跑深度学习的时候最好使用GPU，这样速度会很快。不要的话默认用cpu跑\n",
    "epochs = 500                #这是迭代次数，把所有的训练数据输入到网络里去就叫完成了一次epoch。\n",
    "batch_size = 256\n",
    "learningrate = 1e-3           #学习率，相当于优化算法里的步长，学习率越大，网络参数更新地更加激进。学习率越小，网络学习地更加稳定。\n",
    "net = DNN().to(device=device) #网络的初始化\n",
    "optimizer = torch.optim.Adam(net.parameters(), lr=learningrate)#优化器，不同的优化器选择的优化方式不同，这里用的是随机梯度下降SGD的一种类型，Adam自适应优化器。需要输入网络的参数以及学习率，当然还可以设置其他的参数\n",
    "crossloss  = nn.CrossEntropyLoss()      #损失函数，这里选用的是交叉墒。损失函数也就是用来计算网络输出的结果与对应的标签之间的差距，差距越大，说明网络训练不够好，还需要继续迭代。\n",
    "MinTrainLoss = 1e10          \n",
    "train_loss =[]               #用一个空列表来存储训练时的损失，便于画图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "f8a9f7c2",
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据集构造函数，返回batch_size个数据\n",
    "def build_data(batch_size,pri_list,TOA_length,scale,time_window_size,window_length,left_rate, add_rate):\n",
    "    train_data = []\n",
    "    label = []\n",
    "    for i in range(batch_size):\n",
    "        TOA = TOA_Generator(pri_list,TOA_length)\n",
    "        TOA = Add_gauss_error_inTOA(TOA,scale)\n",
    "        Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "        Binary_list = TOA_left_add(Binary_list, left_rate, add_rate)\n",
    "        train_data.append(Binary_list)\n",
    "        origin_Binary_list = TOA_to_Binary(TOA_Generator(pri_list,TOA_length),time_window_size, window_length)\n",
    "        label.append(origin_Binary_list)\n",
    "        \n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "c3334214",
   "metadata": {},
   "outputs": [],
   "source": [
    "#构造一个包含4种重频类型的数据集\n",
    "def build_data_mix(batch_size,pri_list_list,TOA_length,scale,time_window_size,window_length,left_rate, add_rate):\n",
    "    train_data = []\n",
    "    label = []\n",
    "    for i in range(batch_size):\n",
    "        index = random.randint(0,3)\n",
    "        pri_list = pri_list_list[index]\n",
    "        label_list = [0]*pri_type\n",
    "        label_list[index] = 1\n",
    "        \n",
    "        TOA = TOA_Generator(pri_list,TOA_length)\n",
    "        TOA = Add_gauss_error_inTOA(TOA,scale)\n",
    "        Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "        Binary_list = TOA_left_add(Binary_list, left_rate, add_rate)\n",
    "        train_data.append(Binary_list)\n",
    "        #origin_Binary_list = TOA_to_Binary(TOA_Generator(pri_list,TOA_length),time_window_size, window_length)\n",
    "        label.append(label_list)\n",
    "        \n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,pri_type)\n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "6ad7cc2a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/gary/Code/dataset/radar/cons10M.txt 9901249 102809220\n",
      "/home/gary/Code/dataset/radar/sliding10M.txt 9902270 108924970\n",
      "/home/gary/Code/dataset/radar/ds10M.txt 9902473 108927203\n",
      "/home/gary/Code/dataset/radar/wobu10M.txt 9900353 108903883\n",
      "[9901249, 9902270, 9902473, 9900353] [102809220, 108924970, 108927203, 108903883]\n"
     ]
    }
   ],
   "source": [
    "#数据集路径\n",
    "file_path_list = [\"/home/gary/Code/dataset/radar/cons10M.txt\",\n",
    "                  \"/home/gary/Code/dataset/radar/sliding10M.txt\",\n",
    "                  \"/home/gary/Code/dataset/radar/ds10M.txt\",\n",
    "                  \"/home/gary/Code/dataset/radar/wobu10M.txt\"]\n",
    "# file_path_list = [\"/mnt/f2fs/cons10M.txt\",\n",
    "#                   \"/mnt/f2fs/sliding10M.txt\",\n",
    "#                   \"/mnt/f2fs/ds10M.txt\",\n",
    "#                   \"/mnt/f2fs/wobu10M.txt\"]\n",
    "\n",
    "file_lines = []\n",
    "file_sizes = []\n",
    "\n",
    "#读取文件行数和大小\n",
    "import subprocess, os\n",
    "def wc_count(file_name):\n",
    "    out = subprocess.getoutput(\"wc -l %s\" % file_name)\n",
    "    return int(out.split()[0])\n",
    "\n",
    "def update_fileinfo(trace_list):\n",
    "    global file_lines\n",
    "    global file_sizes\n",
    "    file_lines = []\n",
    "    file_sizes = []\n",
    "    for line in trace_list:\n",
    "        rows = wc_count(line)\n",
    "        size = os.stat(line).st_size\n",
    "        print(line,rows,size)\n",
    "        file_lines.append(rows)\n",
    "        file_sizes.append(size)\n",
    "        \n",
    "update_fileinfo(file_path_list)\n",
    "print(file_lines,file_sizes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "f593fb68",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[27971744, 27971794, 27971997, 27972190, 27972405, 27972581, 27972789, 27972998, 27973196, 27973388, 27973599, 27973795, 27974012, 27974194, 27974398, 27974586, 27974773, 27974990, 27975193, 27975397, 27975589, 27975780, 27975990, 27976161, 27976211, 27976405, 27976602, 27976809, 27976953, 27977003, 27977207, 27977400, 27977601, 27977805, 27977946, 27977996, 27978195, 27978400, 27978573, 27978802, 27979003, 27979206, 27979400, 27979620, 27979806, 27980002, 27980203, 27980398, 27980596, 27980755, 27980805, 27980953, 27981003, 27981182, 27981403, 27981601, 27981766, 27981816, 27982006, 27982145, 27982195, 27982404, 27982608, 27982810, 27983006, 27983151, 27983201, 27983386, 27983588, 27983790, 27984000, 27984182, 27984385, 27984543, 27984593, 27984811, 27985018, 27985187, 27985352, 27985402, 27985608, 27985784, 27986001, 27986198, 27986409, 27986603, 27986804, 27986999, 27987224, 27987399, 27987602, 27987798, 27988009, 27988192, 27988392, 27988600, 27988801, 27988984, 27989181, 27989353]\n",
      "[0, 50, 253, 446, 661, 837, 1045, 1254, 1452, 1644, 1855, 2051, 2268, 2450, 2654, 2842, 3029, 3246, 3449, 3653, 3845, 4036, 4246, 4417, 4467, 4661, 4858, 5065, 5209, 5259, 5463, 5656, 5857, 6061, 6202, 6252, 6451, 6656, 6829, 7058, 7259, 7462, 7656, 7876, 8062, 8258, 8459, 8654, 8852, 9011, 9061, 9209, 9259, 9438, 9659, 9857, 10022, 10072, 10262, 10401, 10451, 10660, 10864, 11066, 11262, 11407, 11457, 11642, 11844, 12046, 12256, 12438, 12641, 12799, 12849, 13067, 13274, 13443, 13608, 13658, 13864, 14040, 14257, 14454, 14665, 14859, 15060, 15255, 15480, 15655, 15858, 16054, 16265, 16448, 16648, 16856, 17057, 17240, 17437, 17609]\n"
     ]
    }
   ],
   "source": [
    "#根据一个偏移量获取后面line_count行的数据\n",
    "def get_lines_by_offset(fd,offset,line_count):\n",
    "    fd.seek(offset)\n",
    "    fd.readline() #这一行可能不全，不要了\n",
    "    \n",
    "    result = []\n",
    "    for i in range(line_count):\n",
    "        try:\n",
    "            result.append(int(fd.readline()))\n",
    "        except:\n",
    "            print(\"Error!line:\",fd.readline(),\" start offset:\",offset)\n",
    "            return\n",
    "        \n",
    "    return result\n",
    "    \n",
    "fd = open(file_path_list[0],\"r\")\n",
    "result = get_lines_by_offset(fd, 1322232, 100)\n",
    "print(result)\n",
    "result = list(map(lambda item:item - result[0], result))\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "0061e1b0",
   "metadata": {},
   "outputs": [],
   "source": [
    "def build_data_fromfile(batch_size,TOA_length,time_window_size,window_length,files=file_path_list):\n",
    "    train_data = []\n",
    "    label = []\n",
    "    fd_list = []\n",
    "    for file in files:\n",
    "        fd_list.append(open(file,\"r\")) #提前打开所有文件\n",
    "        \n",
    "    for i in range(batch_size):\n",
    "        index = random.randint(0,len(file_path_list)-1) #选择从哪个文件生成数据\n",
    "        fd = fd_list[index]\n",
    "        max_offset = file_sizes[index]\n",
    "        label_list = [0]*len(file_path_list) #设置标签\n",
    "        label_list[index] = 1\n",
    "        \n",
    "        #读取数据\n",
    "        TOA = get_lines_by_offset(fd, random.randint(0,max_offset-TOA_length*20), TOA_length) #这里的20是预估每一行的字符数，最大20应该足够了\n",
    "        TOA = list(map(lambda item:item - TOA[0], TOA))\n",
    "        \n",
    "        Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "        train_data.append(Binary_list)\n",
    "        #origin_Binary_list = TOA_to_Binary(TOA_Generator(pri_list,TOA_length),time_window_size, window_length)\n",
    "        label.append(label_list)\n",
    "        \n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,len(file_path_list))\n",
    "    \n",
    "    for fd in fd_list:\n",
    "        fd.close()\n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "24954b6b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[1., 0., 0., 1., 0., 0., 0., 0., 1., 0., 1., 1., 0., 0., 0., 0., 1., 0.,\n",
      "         0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0.,\n",
      "         1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 1., 0., 0.,\n",
      "         0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0.,\n",
      "         1., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0.,\n",
      "         0., 1., 0., 0., 0., 0., 1., 0., 0., 1.],\n",
      "        [1., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 1., 0.,\n",
      "         0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 1., 0., 0.,\n",
      "         1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 1., 1., 0., 1.,\n",
      "         0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1.,\n",
      "         0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0.,\n",
      "         0., 1., 1., 0., 1., 0., 0., 1., 0., 0.]])\n",
      "tensor([[1., 0., 0., 0.],\n",
      "        [0., 1., 0., 0.]])\n"
     ]
    }
   ],
   "source": [
    "train_data, label = build_data_fromfile(2,100,50,100)\n",
    "print(train_data)\n",
    "print(label)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "5428b4d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "def build_data_specific_fileline(filename,start_toa,label_index,TOA_length,time_window_size,window_length):\n",
    "    batch_size = 1\n",
    "    train_data = []\n",
    "    TOA = []\n",
    "    label = []\n",
    "    fd = open(filename,\"r\")\n",
    "        \n",
    "    for i in range(batch_size):\n",
    "        label_list = [0]*len(file_path_list) #设置标签\n",
    "        label_list[label_index] = 1\n",
    "        \n",
    "        #读取数据\n",
    "        for line in fd:\n",
    "            if(int(line)<start_toa):\n",
    "                continue\n",
    "            else:\n",
    "                TOA.append(int(line))\n",
    "                if(len(TOA) >= TOA_length):\n",
    "                    break\n",
    "        \n",
    "        #print(TOA)\n",
    "        TOA = list(map(lambda item:item - TOA[0], TOA))\n",
    "        #print(TOA)\n",
    "        \n",
    "        Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "        train_data.append(Binary_list)\n",
    "        #origin_Binary_list = TOA_to_Binary(TOA_Generator(pri_list,TOA_length),time_window_size, window_length)\n",
    "        label.append(label_list)\n",
    "        \n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,len(file_path_list))\n",
    "    \n",
    "    fd.close()\n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "23f07080",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0.,\n",
      "        0., 0.]) ..... tensor([0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0.,\n",
      "        0., 0.])\n"
     ]
    }
   ],
   "source": [
    "temp_toa,temp_label = build_data_specific_fileline(file_path_list[0],169927391,0,2048,50,1000)\n",
    "print(temp_toa[0][:20],\".....\",temp_toa[0][-20:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "628676bf",
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取文件第一个TOA值\n",
    "def get_file_head(rootpath, file_number):\n",
    "    with open(rootpath+str(file_number),'r') as f:\n",
    "        head = int(f.readline())\n",
    "    return head\n",
    "    \n",
    "\n",
    "#用于在一个文件夹内查找某个TOA所在的文件并读取所需长度后返回，因为指定了TOA所以不存在batch\n",
    "def build_data_specific_toa(rootpath,toa,label_index,max_file_number,TOA_length,time_window_size,window_length):\n",
    "    rootpath = rootpath+'/'\n",
    "    train_data = []\n",
    "    TOA = []\n",
    "    #设置标签\n",
    "    label = []\n",
    "    label_list = [0]*len(file_path_list) #设置标签\n",
    "    label_list[label_index] = 1\n",
    "    label.append(label_list)\n",
    "    \n",
    "    #找到查询toa所在的文件\n",
    "    head = 0\n",
    "    tail = max_file_number\n",
    "    start_file = head\n",
    "        \n",
    "    #从该文件读取\n",
    "    while(True):\n",
    "        if(head >= tail):\n",
    "            start_file = head;\n",
    "            break;\n",
    "        #读取首尾两个文件的起始toa\n",
    "        head_next_start = get_file_head(rootpath,head+1);\n",
    "        tail_start = get_file_head(rootpath,tail);\n",
    "        \n",
    "        if(toa < head_next_start):\n",
    "            start_file = head;\n",
    "            break;\n",
    "\n",
    "        if(toa >= tail_start):\n",
    "            start_file = tail;\n",
    "            break;\n",
    "\n",
    "        middle = (head+tail)//2;\n",
    "        middle_start = get_file_head(rootpath,middle);\n",
    "        if(toa > middle_start):\n",
    "            head = middle;\n",
    "\n",
    "        elif(toa < middle_start):\n",
    "            tail = middle;\n",
    "    \n",
    "        else:\n",
    "            start_file = middle;\n",
    "            break;\n",
    "\n",
    "    #读取数据\n",
    "    file_path = rootpath+str(start_file)\n",
    "    #print(\"read from\",file_path)\n",
    "    with open(file_path,'r') as fd:\n",
    "        for line in fd:\n",
    "            TOA.append(int(line))\n",
    "            if(len(TOA) >= TOA_length):\n",
    "                break\n",
    "\n",
    "    #print(TOA)\n",
    "    TOA = list(map(lambda item:item - TOA[0], TOA))\n",
    "    #print(TOA)\n",
    "\n",
    "    Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "    train_data.append(Binary_list)\n",
    "        \n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,len(file_path_list))\n",
    "    \n",
    "    fd.close()\n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "707506ea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "write progress: 10 %\n",
      "write progress: 20 %\n",
      "write progress: 30 %\n",
      "write progress: 40 %\n",
      "write progress: 50 %\n",
      "write progress: 60 %\n",
      "write progress: 70 %\n",
      "write progress: 80 %\n",
      "write progress: 90 %\n",
      "write progress: 100 %\n"
     ]
    }
   ],
   "source": [
    "#用于写入文件\n",
    "def build_simple_dataset(rootpath, signal_count):\n",
    "    buffer = []\n",
    "    for i in range(signal_count):\n",
    "        buffer.append(i)\n",
    "        buffer.append(i+1)\n",
    "        buffer.append(i+2)\n",
    "        \n",
    "        if((i+1)%2048 == 0):\n",
    "            filename = rootpath+'/'+str(i//2048)\n",
    "            with open(filename,'w') as fd:\n",
    "                for line in buffer:\n",
    "                    fd.write(str(line)+\"\\n\")\n",
    "            buffer.clear()\n",
    "                    \n",
    "        if((i+1)%(signal_count//10)==0):\n",
    "            print(\"write progress:\",(i+1)//(signal_count//10)*10,\"%\")\n",
    "                    \n",
    "build_simple_dataset(\"/mnt/f2fs\",64*1000*1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "336735fb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\n",
      "tensor([[1., 0., 0., 0.]])\n"
     ]
    }
   ],
   "source": [
    "#数据集参数副本，仅用于本次测试\n",
    "time_window_size = 10\n",
    "origin_toa_length = 2048\n",
    "binary_tia_length = 1000\n",
    "scale = 10\n",
    "miss_ratio = 0.1\n",
    "error_ratio = 0.1\n",
    "pri_type = 4\n",
    "\n",
    "trace_length = 16*1000*1000\n",
    "max_file_name = trace_length//2048-1 #start from 0\n",
    "toa = random.randint(0,trace_length-origin_toa_length)\n",
    "\n",
    "for i in range(2000):\n",
    "    train_data, label = build_data_specific_toa(rootpath=\"/mnt/f2fs\",\n",
    "                                                toa=toa,\n",
    "                                                label_index=0,\n",
    "                                                max_file_number=max_file_name, #最多有多少个文件，从0开始\n",
    "                                                TOA_length=origin_toa_length,\n",
    "                                                time_window_size=time_window_size,\n",
    "                                                window_length=binary_tia_length)\n",
    "\n",
    "#train_data, label = build_data_rootpath(2,100,50,100,file_len=1000000,rootpath=\"/home/gary/Code/dataset/radar/diff_ratio\")\n",
    "print(train_data[0][:10])\n",
    "print(label)\n",
    "\n",
    "#数据集参数\n",
    "time_window_size = 10\n",
    "origin_toa_length = 2048\n",
    "binary_tia_length = 1000\n",
    "scale = 10\n",
    "miss_ratio = 0.1\n",
    "error_ratio = 0.1\n",
    "pri_type = 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "bde1dca7",
   "metadata": {},
   "outputs": [],
   "source": [
    "#根据需要的长度，误差从trace库中寻找，若没有所需的trace则现场生成，若有则直接获取\n",
    "#这个函数主要作用就是在调用build_data_fromfile利用trace生成脚本自动生成trace后作为参数传入\n",
    "import os\n",
    "def build_data_rootpath(batch_size,TOA_length,time_window_size,window_length,file_len=1000000,rootpath=\"/home/gary/Code/dataset/radar/diff_ratio\",update_info=True):\n",
    "    local_file_name_list = []\n",
    "    #删除toa_start.txt文件并新建一个后写入0。这个文件的作用是记录上一次trace生成到了多少了（因为很难以os.system的方式获取返回值）\n",
    "    if(update_info):\n",
    "        os.system(\"rm toa_start.txt\")\n",
    "        os.system(\"echo 0 > toa_start.txt\")\n",
    "    \n",
    "    #根据参数生成数据集路径，并逐一添加到列表中\n",
    "    type_list = [\"cons\", \"sliding\", \"ds\", \"wobu\"]\n",
    "    for t in type_list:\n",
    "        trace_path = rootpath + '/'\n",
    "        trace_path += t\n",
    "\n",
    "        trace_len = file_len\n",
    "        unit = ''\n",
    "        if(trace_len >= 1000*1000*1000):\n",
    "            unit = 'G'\n",
    "            trace_len = trace_len//(1000*1000*1000)\n",
    "        elif(trace_len >= 1000*1000):\n",
    "            unit = 'M'\n",
    "            trace_len = trace_len//(1000*1000)\n",
    "        elif(trace_len >= 1000):\n",
    "            unit = 'K'\n",
    "            trace_len = trace_len//1000\n",
    "\n",
    "        trace_path += str(trace_len) + unit + '_'\n",
    "        trace_path += str(scale).zfill(2) + str(int(miss_ratio*100)).zfill(2) + str(int(error_ratio*100)).zfill(2)\n",
    "        trace_path += '.txt'\n",
    "        \n",
    "        local_file_name_list.append(trace_path)\n",
    "        \n",
    "        #检查该文件是否存在，不存在则生成\n",
    "        if(not os.path.exists(trace_path)):\n",
    "            print(trace_path,'not exist, now generate')\n",
    "            trace_gen_command = \"python3 signal_file_gen.py \"\n",
    "            trace_gen_command += \"-rootpath \" + rootpath + ' '\n",
    "            trace_gen_command += \"-type \" + t + ' '\n",
    "            start_from = 0\n",
    "            with open(\"toa_start.txt\",'r') as f:\n",
    "                start_from = int(f.readline())\n",
    "            trace_gen_command += \"-start \" + str(start_from) + ' '\n",
    "            trace_gen_command += \"-len \" + str(file_len) + ' '\n",
    "            trace_gen_command += \"-gauss \" + str(scale) + ' '\n",
    "            trace_gen_command += \"-miss \" + str(int(miss_ratio*100)) + ' '\n",
    "            trace_gen_command += \"-noise \" + str(int(error_ratio*100)) + ' '\n",
    "            \n",
    "            print(\"Generate command:\",trace_gen_command)\n",
    "            os.system(trace_gen_command)\n",
    "    \n",
    "    print(\"File list:\",local_file_name_list)\n",
    "    if(update_info):\n",
    "        print(\"Update file list info\")\n",
    "        update_fileinfo(local_file_name_list)\n",
    "        print(\"file_lines:\",file_lines)\n",
    "        print(\"file_sizes:\",file_sizes)\n",
    "    \n",
    "    #将已经全部生成完毕的trace列表传入build_data_fromfile\n",
    "    return build_data_fromfile(batch_size,TOA_length,time_window_size,window_length,local_file_name_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "aec09d1e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "File list: ['/home/gary/Code/dataset/radar/diff_ratio/cons1M_101010.txt', '/home/gary/Code/dataset/radar/diff_ratio/sliding1M_101010.txt', '/home/gary/Code/dataset/radar/diff_ratio/ds1M_101010.txt', '/home/gary/Code/dataset/radar/diff_ratio/wobu1M_101010.txt']\n",
      "Error!line:   start offset: 44872135\n"
     ]
    },
    {
     "ename": "TypeError",
     "evalue": "'NoneType' object is not iterable",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_4856/3195574502.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      8\u001b[0m \u001b[0mpri_type\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m4\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m train_data, label = build_data_rootpath(2,\n\u001b[0m\u001b[1;32m     11\u001b[0m                                         \u001b[0morigin_toa_length\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     12\u001b[0m                                         \u001b[0mtime_window_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/tmp/ipykernel_4856/1878331412.py\u001b[0m in \u001b[0;36mbuild_data_rootpath\u001b[0;34m(batch_size, TOA_length, time_window_size, window_length, file_len, rootpath, update_info)\u001b[0m\n\u001b[1;32m     59\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     60\u001b[0m     \u001b[0;31m#将已经全部生成完毕的trace列表传入build_data_fromfile\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 61\u001b[0;31m     \u001b[0;32mreturn\u001b[0m \u001b[0mbuild_data_fromfile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mTOA_length\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mtime_window_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mwindow_length\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlocal_file_name_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m/tmp/ipykernel_4856/2268713229.py\u001b[0m in \u001b[0;36mbuild_data_fromfile\u001b[0;34m(batch_size, TOA_length, time_window_size, window_length, files)\u001b[0m\n\u001b[1;32m     15\u001b[0m         \u001b[0;31m#读取数据\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     16\u001b[0m         \u001b[0mTOA\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_lines_by_offset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfd\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmax_offset\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mTOA_length\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0;36m20\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTOA_length\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m#这里的20是预估每一行的字符数，最大20应该足够了\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 17\u001b[0;31m         \u001b[0mTOA\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mlambda\u001b[0m \u001b[0mitem\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mitem\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mTOA\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTOA\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     18\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     19\u001b[0m         \u001b[0mBinary_list\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTOA_to_Binary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mTOA\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtime_window_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwindow_length\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mTypeError\u001b[0m: 'NoneType' object is not iterable"
     ]
    }
   ],
   "source": [
    "#数据集参数副本，仅用于本次测试\n",
    "time_window_size = 10\n",
    "origin_toa_length = 2048\n",
    "binary_tia_length = 1000\n",
    "scale = 10\n",
    "miss_ratio = 0.1\n",
    "error_ratio = 0.1\n",
    "pri_type = 4\n",
    "\n",
    "train_data, label = build_data_rootpath(2,\n",
    "                                        origin_toa_length,\n",
    "                                        time_window_size,\n",
    "                                        binary_tia_length,\n",
    "                                        file_len=1000000,\n",
    "                                        rootpath=\"/home/gary/Code/dataset/radar/diff_ratio\",\n",
    "                                        update_info=False)\n",
    "\n",
    "#train_data, label = build_data_rootpath(2,100,50,100,file_len=1000000,rootpath=\"/home/gary/Code/dataset/radar/diff_ratio\")\n",
    "print(train_data)\n",
    "print(label)\n",
    "\n",
    "#数据集参数\n",
    "time_window_size = 10\n",
    "origin_toa_length = 2048\n",
    "binary_tia_length = 1000\n",
    "scale = 10\n",
    "miss_ratio = 0.1\n",
    "error_ratio = 0.1\n",
    "pri_type = 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "590ffa60",
   "metadata": {},
   "outputs": [],
   "source": [
    "#从MySQL生成数据\n",
    "from radarssd_sql import *\n",
    "\n",
    "pri_type_range = [1802175393,3604210499,5466294896,7267999178]\n",
    "def build_data_fromdb(batch_size,TOA_length,time_window_size,window_length,max_stored_toa=0):\n",
    "    train_data = []\n",
    "    TOA = []\n",
    "    label = []\n",
    "    \n",
    "    #连接数据库\n",
    "    conn = connect_db()\n",
    "    cursor = conn.cursor()\n",
    "        \n",
    "    for i in range(batch_size):\n",
    "        max_toa=0\n",
    "        if(max_stored_toa==0):\n",
    "            max_toa = pri_type_range[-1]-TOA_length\n",
    "        else:\n",
    "            max_toa = max_stored_toa-TOA_length #for performance test\n",
    "        toa = random.randint(0,max_toa)\n",
    "        \n",
    "        #查询该toa属于哪个类型\n",
    "        label_list = [0]*len(file_path_list) #设置标签\n",
    "#         for pri_type in range(len(pri_type_range)):\n",
    "#             if(toa < pri_type_range[pri_type]):\n",
    "#                 label_list[pri_type] = 1\n",
    "#                 break\n",
    "        \n",
    "        #读取数据\n",
    "        cursor.execute(build_select_query(start_TOA=toa, limit=TOA_length))\n",
    "        datas = cursor.fetchall()\n",
    "        for toa_data in datas:\n",
    "            TOA.append(toa_data[0])\n",
    "        \n",
    "        TOA = list(map(lambda item:item - TOA[0], TOA))\n",
    "\n",
    "        Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "        train_data.append(Binary_list)\n",
    "        #origin_Binary_list = TOA_to_Binary(TOA_Generator(pri_list,TOA_length),time_window_size, window_length)\n",
    "        label.append(label_list)\n",
    "        \n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,len(file_path_list))\n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "7cdd0d6f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[1., 0., 0.,  ..., 0., 0., 0.],\n",
      "        [1., 0., 0.,  ..., 0., 0., 0.]])\n",
      "tensor([[0., 0., 0., 0.],\n",
      "        [0., 0., 0., 0.]])\n"
     ]
    }
   ],
   "source": [
    "a,b = build_data_fromdb(2,10,time_window_size,binary_tia_length,1000)\n",
    "print(a)\n",
    "print(b)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "78ab37f1",
   "metadata": {},
   "outputs": [],
   "source": [
    "#从MongoDB生成数据，这里不考虑准确度只考虑性能因此label恒定为0\n",
    "from radarssd_kv import *\n",
    "\n",
    "def build_data_fromkv(batch_size,TOA_length,time_window_size,window_length,total_signal):\n",
    "    train_data = []\n",
    "    TOA = []\n",
    "    label = []\n",
    "    \n",
    "    #连接数据库\n",
    "    kv_fd = open_mongodb()\n",
    "        \n",
    "    for i in range(batch_size):\n",
    "        max_toa = total_signal-TOA_length\n",
    "        toa = random.randint(0,max_toa)\n",
    "        \n",
    "        #定义标签\n",
    "        label_list = [0]*len(file_path_list) #设置标签\n",
    "        label_list[0] = 1\n",
    "        \n",
    "        #读取数据\n",
    "        TOA = get_toa(kv_fd, toa, TOA_length)\n",
    "        \n",
    "        TOA = list(map(lambda item:item - TOA[0], TOA))\n",
    "\n",
    "        Binary_list = TOA_to_Binary(TOA, time_window_size, window_length)\n",
    "        train_data.append(Binary_list)\n",
    "        #origin_Binary_list = TOA_to_Binary(TOA_Generator(pri_list,TOA_length),time_window_size, window_length)\n",
    "        label.append(label_list)\n",
    "        \n",
    "    start = time.time()\n",
    "    train_data = torch.Tensor(train_data).to(device=device,dtype = torch.float32).reshape(-1,window_length)\n",
    "    label = torch.Tensor(label).to(device=device,dtype = torch.float32).reshape(-1,len(file_path_list))\n",
    "    end = time.time()\n",
    "    #print(\"data transfer:%.2fus\"%((end-start)*1000000)) \n",
    "    \n",
    "    return train_data, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "520b82aa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "data transfer:324.96us\n",
      "tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
      "         1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
      "         1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
      "         1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
      "         1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
      "         1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n",
      "tensor([[1., 0., 0., 0.]])\n"
     ]
    }
   ],
   "source": [
    "a,b = build_data_fromkv(1,1000,time_window_size,binary_tia_length,1000)\n",
    "print(a)\n",
    "print(b)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "22f73adf",
   "metadata": {},
   "outputs": [],
   "source": [
    "#神经网络训练函数\n",
    "start = time.time()\n",
    "start0=time.time()\n",
    "for epoch in range(1,epochs+1):\n",
    "    net.train()    #net.train()：在这个模式下，网络的参数会得到更新。对应的还有net.eval()，这就是在验证集上的时候，我们只评价模型，并不对网络参数进行更新。\n",
    "    #pt_x_train, pt_y_train = build_data_mix(batch_size,pri_list_list,origin_toa_length,scale,time_window_size,binary_tia_length,miss_ratio,error_ratio) #返回一批数据，这里的训练数据是每次现场生成的\n",
    "    pt_x_train, pt_y_train = build_data_fromfile(batch_size,origin_toa_length,time_window_size,binary_tia_length) #返回一批数据，这里的训练数据是每次现场生成的\n",
    "    \n",
    "    pt_y_pred = net(pt_x_train) #将tensor放入网络中得到预测值\n",
    "    loss = crossloss(pt_y_pred,pt_y_train)  #计算预测值和对应标签的差别\n",
    "    optimizer.zero_grad()      #在每一次迭代梯度反传更新网络参数时，需要把之前的梯度清0，不然上一次的梯度会累积到这一次。\n",
    "    loss.backward()  # 反向传播\n",
    "    optimizer.step() #优化器进行下一次迭代\n",
    "    if epoch % 10 == 0:#每10个epoch保存一次loss\n",
    "        end = time.time()\n",
    "        print(\"epoch:[%5d/%5d] time:%.2fs current_loss:%.5f\"\n",
    "          %(epoch,epochs,(end-start),loss.item()))\n",
    "        start = time.time()\n",
    "    train_loss.append(loss.item())\n",
    "    if train_loss[-1] < MinTrainLoss:\n",
    "        torch.save(net.state_dict(),\"model.pth\") #保存每一次loss下降的模型\n",
    "        MinTrainLoss = train_loss[-1]\n",
    "end0 = time.time()\n",
    "print(\"训练总用时: %.2fmin\"%((end0-start0)/60)) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 284,
   "id": "3b2795c7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "write progress: 10 %\n",
      "write progress: 20 %\n",
      "write progress: 30 %\n",
      "write progress: 40 %\n",
      "write progress: 50 %\n",
      "write progress: 60 %\n",
      "write progress: 70 %\n",
      "write progress: 80 %\n",
      "write progress: 90 %\n",
      "write progress: 100 %\n"
     ]
    }
   ],
   "source": [
    "#用于进行数据规模-计算速度测试前生成所需规模的数据\n",
    "build_simple_dataset(\"/mnt/f2fs\",1000*1000*1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "id": "8f44f240",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "total spend time: 779322.86 us, average: 15586.457 us\n",
      "total read time: 763290.41 us, average: 15265.808 us\n",
      "total compute time: 15924.22 us, average: 318.484 us\n"
     ]
    },
    {
     "ename": "IndexError",
     "evalue": "index 1 is out of bounds for dimension 0 with size 1",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_4663/989593579.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m    128\u001b[0m \u001b[0msame\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    129\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 130\u001b[0;31m     \u001b[0;32mif\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mequal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpt_y_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    131\u001b[0m         \u001b[0msame\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    132\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mIndexError\u001b[0m: index 1 is out of bounds for dimension 0 with size 1"
     ]
    }
   ],
   "source": [
    "#评估模型\n",
    "model_start = time.time()\n",
    "Dnn = DNN().to(device)\n",
    "Dnn.load_state_dict(torch.load(\"model_byfile.pth\",map_location=device))#pytoch 导入模型\n",
    "Dnn.eval()#这里指评价模型，不反传，所以用eval模式\n",
    "\n",
    "#数据集参数副本，仅用于本次测试\n",
    "time_window_size = 10\n",
    "origin_toa_length = 2048\n",
    "binary_tia_length = 1000\n",
    "scale = 10\n",
    "miss_ratio = 0.1\n",
    "error_ratio = 0.1\n",
    "pri_type = 4\n",
    "trace_file_len = 16*1000000//4 #使用的数据集大小\n",
    "\n",
    "test_batch = 50 #总共运行多少次，若设置为single模式则表示一共运行多少次单batch\n",
    "#read_method = \"batch\"\n",
    "read_method = \"single\"\n",
    "total_second = 0\n",
    "read_time = 0\n",
    "compute_time = 0\n",
    "\n",
    "if(read_method == \"batch\"):\n",
    "    print(\"Processing in Batch\")\n",
    "    read_start = time.time()\n",
    "\n",
    "    pt_x_test,pt_y_test = build_data_fromfile(test_batch,origin_toa_length,time_window_size,binary_tia_length) #构造一个测试数据\n",
    "    #pt_x_test,pt_y_test = build_data_specific_fileline(file_path_list[0],159979601,0,2048,10,1000)\n",
    "    #pt_x_test,pt_y_test = build_data_fromdb(test_batch,origin_toa_length,time_window_size,binary_tia_length)\n",
    "    pt_x_test,pt_y_test = build_data_rootpath(  test_batch,\n",
    "                                                origin_toa_length,\n",
    "                                                time_window_size,\n",
    "                                                binary_tia_length,\n",
    "                                                file_len=trace_file_len,\n",
    "                                                #rootpath=\"/home/gary/Code/dataset/radar/diff_ratio\",\n",
    "                                                rootpath=\"/mnt/f2fs\",\n",
    "                                                update_info=True)\n",
    "    #print(\"input :\",pt_x_test[:5]) #输入数据，为添加了各种误差后的二进制信号\n",
    "    #print(\"origin:\",pt_y_test[:5]) #原始数据，为不包含误差的原始信号\n",
    "\n",
    "    compute_start = time.time()\n",
    "    result_y_test = Dnn(pt_x_test) #获取模型推理结果\n",
    "    end_time = time.time()\n",
    "\n",
    "\n",
    "    total_second = round(end_time-read_start,6)*1000000\n",
    "    read_time = round(compute_start-read_start,6)*1000000\n",
    "    compute_time = round(end_time-compute_start,6)*1000000\n",
    "    \n",
    "elif(read_method == \"single\"):\n",
    "    #print(\"Processing Individual\")\n",
    "    trace_length = 4*1000*1000\n",
    "    max_file_name = trace_length//2048-1 #start from 0\n",
    "    toa = random.randint(0,trace_length-origin_toa_length)\n",
    "    total_second = time.time()\n",
    "    for i in range(test_batch):\n",
    "        read_start = time.time()\n",
    "        #pt_x_test,pt_y_test = build_data_fromfile(1,origin_toa_length,time_window_size,binary_tia_length) #构造一个测试数据\n",
    "        #build_data_specific_fileline(filename,start_toa,label_index,TOA_length,time_window_size,window_length)\n",
    "#         pt_x_test,pt_y_test = build_data_specific_fileline(file_path_list[0],159979601,0,origin_toa_length,time_window_size,binary_tia_length)\n",
    "#         pt_x_test,pt_y_test = build_data_rootpath(  1,\n",
    "#                                                     origin_toa_length,\n",
    "#                                                     time_window_size,\n",
    "#                                                     binary_tia_length,\n",
    "#                                                     file_len=trace_file_len,\n",
    "#                                                     #rootpath=\"/home/gary/Code/dataset/radar/diff_ratio\",\n",
    "#                                                     rootpath=\"/mnt/f2fs\",\n",
    "#                                                     update_info=False)\n",
    "        \n",
    "#         pt_x_test,pt_y_test = build_data_specific_toa(rootpath=\"/mnt/f2fs\",\n",
    "#                                                     toa=toa,\n",
    "#                                                     label_index=0,\n",
    "#                                                     max_file_number=max_file_name, #最多有多少个文件，从0开始\n",
    "#                                                     TOA_length=origin_toa_length,\n",
    "#                                                     time_window_size=time_window_size,\n",
    "#                                                     window_length=binary_tia_length)\n",
    "\n",
    "#         pt_x_test,pt_y_test = build_data_fromkv(1,\n",
    "#                                                 origin_toa_length,\n",
    "#                                                 time_window_size,\n",
    "#                                                 binary_tia_length,\n",
    "#                                                 trace_length)\n",
    "\n",
    "        pt_x_test,pt_y_test = build_data_fromdb(1,origin_toa_length,time_window_size,binary_tia_length,trace_length)\n",
    "    \n",
    "        read_time += time.time()-read_start\n",
    "        \n",
    "        compute_start = time.time()\n",
    "        result_y_test = Dnn(pt_x_test) #获取模型推理结果\n",
    "        compute_time += time.time()-compute_start\n",
    "        \n",
    "    total_second = round(time.time()-total_second,8)*1000000\n",
    "    read_time = round(read_time,8)*1000000\n",
    "    compute_time = round(compute_time,8)*1000000\n",
    "    \n",
    "else:\n",
    "    print(\"read_method error\")\n",
    "\n",
    "print(\"total spend time:\",round(total_second,3),\"us, average:\",round(total_second/test_batch,3),\"us\")\n",
    "print(\"total read time:\",round(read_time,3),\"us, average:\",round(read_time/test_batch,3),\"us\")\n",
    "print(\"total compute time:\",round(compute_time,3),\"us, average:\",round(compute_time/test_batch,3),\"us\")\n",
    "\n",
    "np_y_test = result_y_test.detach().cpu().numpy()#输出结果torch tensor，需要转化为numpy类型来进行可视化\n",
    "#print(\"raw output:\",np_y_test[:5])\n",
    "\n",
    "def find_max_index(result):\n",
    "    index = 0\n",
    "    for i,num in enumerate(result):\n",
    "        if num > result[index]:\n",
    "            index = i\n",
    "    return index\n",
    "\n",
    "for row in range(len(np_y_test)):\n",
    "    max_index = find_max_index(np_y_test[row])\n",
    "    for index,num in enumerate(np_y_test[row]):\n",
    "        np_y_test[row][index] = 0\n",
    "    np_y_test[row][max_index] = 1\n",
    "#         if(num>0.6): \n",
    "#             np_y_test[row][index] = 1\n",
    "#         else:\n",
    "#             np_y_test[row][index] = 0\n",
    "\n",
    "output = torch.Tensor(np_y_test) #输出模型推理结果\n",
    "#print(\"output:\",output[:5])\n",
    "\n",
    "#计算有多少相同的项\n",
    "same = 0\n",
    "for row in range(test_batch):\n",
    "    if output[row].equal(pt_y_test[row]):\n",
    "        same += 1\n",
    "        \n",
    "print(test_batch)\n",
    "print(same)\n",
    "        \n",
    "print(\"accurate:\",round(same*100/test_batch,3),\"%\") #推理结果与原始数据有多少是相等的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "id": "9cbd0b43",
   "metadata": {},
   "outputs": [
    {
     "ename": "IndexError",
     "evalue": "index 1 is out of bounds for dimension 0 with size 1",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_4663/3954247745.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[0msame\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m     \u001b[0;32mif\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mequal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpt_y_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      5\u001b[0m         \u001b[0msame\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mIndexError\u001b[0m: index 1 is out of bounds for dimension 0 with size 1"
     ]
    }
   ],
   "source": [
    "#计算有多少相同的项\n",
    "same = 0\n",
    "for row in range(test_batch):\n",
    "    if output[row].equal(pt_y_test[row]):\n",
    "        same += 1\n",
    "        \n",
    "print(test_batch)\n",
    "print(same)\n",
    "        \n",
    "print(\"accurate:\",same*100/test_batch,\"%\") #推理结果与原始数据有多少是相等的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "864ab616",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.sigmoid(torch.Tensor([-19.755487]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a7f6fa3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取网络当前参数\n",
    "net_state_dict = Dnn.state_dict()\n",
    "\n",
    "print('net_state_dict类型：', type(net_state_dict))\n",
    "print('net_state_dict管理的参数: ', net_state_dict.keys())\n",
    "for key, value in net_state_dict.items():\n",
    "    print('参数名: ', key, '\\t大小:',  value.shape, '\\t类型: ',  value.dtype)\n",
    "    print(value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dfe89977",
   "metadata": {},
   "outputs": [],
   "source": [
    "#打印参数到文件\n",
    "fd = open(\"model_byfile.txt\",\"w\")\n",
    "\n",
    "net_state_dict = Dnn.state_dict()\n",
    "for key, value in net_state_dict.items():\n",
    "    print('参数名: ', key, '\\t大小:',  value.shape, '\\t类型: ',  value.dtype)\n",
    "    #fd.write('参数名: ' + str(key) + '\\t大小:' + str(value.shape) + '\\t类型: ' + str(value.dtype) + \"\\n\")\n",
    "    for row in value.numpy().tolist():\n",
    "        try:\n",
    "            for column in row:\n",
    "                fd.write(str(column)+\" \")\n",
    "                fd.write(\"\\n\")\n",
    "        except:\n",
    "            fd.write(str(row)+\" \")\n",
    "            fd.write(\"\\n\")\n",
    "    \n",
    "fd.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c36a324f",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(net_state_dict[\"layer4.weight\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "99e7ba08",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个量化的模型实例\n",
    "model_int8 = torch.quantization.quantize_dynamic(\n",
    "    model=Dnn,  # 原始模型\n",
    "    qconfig_spec={torch.nn.Linear},  # 要动态量化的NN算子\n",
    "    dtype=torch.qint8)  # 将权重量化为：float16 \\ qint8"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b4dbee5f",
   "metadata": {},
   "outputs": [],
   "source": [
    "#评估量化后的模型\n",
    "q_test_pri_list = [100, 150, 200, 250, 300]\n",
    "q_pt_x_test,q_pt_y_test = build_data(64,test_error_list,10000,30,200,1500,0.2,0.2) #构造一个测试数据\n",
    "print(\"input :\",q_pt_x_test) #输入数据，为添加了各种误差后的二进制信号\n",
    "print(\"origin:\",q_pt_y_test) #原始数据，为不包含误差的原始信号\n",
    "q_result_y_test = model_int8(q_pt_x_test) #获取模型推理结果\n",
    "q_np_y_test = q_result_y_test.detach().cpu().numpy()#输出结果torch tensor，需要转化为numpy类型来进行可视化\n",
    "print(\"raw output:\",q_np_y_test)\n",
    "for row in range(len(q_np_y_test)):\n",
    "    for index,num in enumerate(q_np_y_test[0]): #因为输出的结果是浮点数，将其转化为二进制值，这里取小于0的为0，大于等于1的为1\n",
    "        if(num<=0):\n",
    "            q_np_y_test[row][index] = 0\n",
    "        else:\n",
    "            q_np_y_test[row][index] = 1\n",
    "q_output = torch.Tensor(q_np_y_test) #输出模型推理结果\n",
    "print(\"q output:\",q_output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "44dc7633",
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算量化后有多少相同的项\n",
    "same = 0\n",
    "for row in range(len(q_pt_x_test)):\n",
    "    for i in range(len(q_pt_x_test[0])):\n",
    "        if q_output[row][i] == q_pt_y_test[row][i]:\n",
    "            same += 1\n",
    "        \n",
    "print(len(q_pt_x_test[0])*len(q_pt_x_test))\n",
    "print(same)\n",
    "        \n",
    "print(\"accurate:\",same/(len(q_pt_x_test[0])*len(q_pt_x_test))*100,\"%\") #推理结果与原始数据有多少是相等的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d5e2dabb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取量化网络后的参数\n",
    "qstate = model_int8.state_dict()\n",
    "\n",
    "print('量化模型类型：', type(qstate))\n",
    "print('量化模型管理的参数: ', qstate.keys())\n",
    "for key, value in qstate.items():\n",
    "    print('参数名: ', key, '\\t大小:',  value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "99c52ab7",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
