{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 前期处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**导入库**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd  # pandas数据处理\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset\n",
    "#from torch.autograd import Variable\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.model_selection import KFold\n",
    "import os,subprocess\n",
    "from sklearn.model_selection import train_test_split\n",
    "import math\n",
    "import time\n",
    "from ast import literal_eval # 用于转换为python对象"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**CUDA配置**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "cuda\n"
     ]
    }
   ],
   "source": [
    "if torch.cuda.is_available():\n",
    "    result = subprocess.check_output(\"nvidia-smi -L | grep -oE '[0-9]+:' | tr -d ':'\", shell=True).decode(\"utf-8\").strip()\n",
    "    os.environ['CUDA_VISIBLE_DEVICES'] = result\n",
    "\n",
    "    print(os.environ['CUDA_VISIBLE_DEVICES'])\n",
    "my_device = \"cpu\"\n",
    "if torch.cuda.is_available():\n",
    "    my_device = \"cuda\"\n",
    "print(my_device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 数据处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**查看数据种类**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['t_final', 'x_0', 'y_0', 'v_x', 'v_y', 'T_constraint',\n",
       "       'S_constraint_min', 'S_constraint_max', 'Z_constraint_min',\n",
       "       'Z_constraint_max', 'Point_constraint', 'UnderFly_constraint'],\n",
       "      dtype='object')"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.read_csv(\"constrain.csv\");\n",
    "df.keys()\n",
    "#df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**制作参数输入数据集**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_theta = df.loc[:,df.columns.drop(['t_final','T_constraint',\n",
    "       'S_constraint_min', 'S_constraint_max', 'Z_constraint_min',\n",
    "       'Z_constraint_max', 'Point_constraint', 'UnderFly_constraint'])]\n",
    "\n",
    "#df_theta.values\n",
    "x = torch.tensor(df_theta.values, dtype=torch.float32,device=my_device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**制作紧约束和终端时间数据集**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_strategy = df.loc[:,df.columns.drop(['x_0', 'y_0', 'v_x', 'v_y'])]\n",
    "df_strategy\n",
    "df_strategy['T_constraint']\n",
    "def convert_string_to_list(s):\n",
    "    # 将分号替换为逗号\n",
    "    s = s.replace(';', ',')\n",
    "    # 使用 ast.literal_eval 解析字符串为列表\n",
    "    return literal_eval(s)\n",
    "def convert_to_float(value):\n",
    "    if isinstance(value, list):\n",
    "        return [float(x) for x in value]\n",
    "    else:\n",
    "        return float(value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "#df_strategy.keys()\n",
    "columns_to_convert = [ 'T_constraint',\n",
    "       'S_constraint_min', 'S_constraint_max', 'Z_constraint_min',\n",
    "       'Z_constraint_max', 'Point_constraint', 'UnderFly_constraint']\n",
    "\n",
    "df_strategy[columns_to_convert] = df_strategy[columns_to_convert].applymap(convert_string_to_list)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>t_final</th>\n",
       "      <th>T_constraint</th>\n",
       "      <th>S_constraint_min</th>\n",
       "      <th>S_constraint_max</th>\n",
       "      <th>Z_constraint_min</th>\n",
       "      <th>Z_constraint_max</th>\n",
       "      <th>Point_constraint</th>\n",
       "      <th>UnderFly_constraint</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>54.3048</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>51.6335</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>62.7271</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>50.6284</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>68.3439</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5082</th>\n",
       "      <td>62.8213</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5083</th>\n",
       "      <td>40.2404</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5084</th>\n",
       "      <td>63.4039</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5085</th>\n",
       "      <td>53.7598</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5086</th>\n",
       "      <td>68.4604</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "      <td>[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5087 rows × 8 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "      t_final                                       T_constraint  \\\n",
       "0     54.3048  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "1     51.6335  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "2     62.7271  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "3     50.6284  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "4     68.3439  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "...       ...                                                ...   \n",
       "5082  62.8213  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "5083  40.2404  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "5084  63.4039  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "5085  53.7598  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "5086  68.4604  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "\n",
       "                                       S_constraint_min  \\\n",
       "0     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "1     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "2     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "3     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "4     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "...                                                 ...   \n",
       "5082  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5083  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5084  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5085  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5086  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "\n",
       "                                       S_constraint_max  \\\n",
       "0     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "1     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "2     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "3     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "4     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "...                                                 ...   \n",
       "5082  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5083  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5084  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5085  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5086  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "\n",
       "                                       Z_constraint_min  \\\n",
       "0     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "1     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "2     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "3     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "4     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "...                                                 ...   \n",
       "5082  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5083  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5084  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5085  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5086  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "\n",
       "                                       Z_constraint_max  \\\n",
       "0     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "1     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "2     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "3     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "4     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "...                                                 ...   \n",
       "5082  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5083  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5084  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5085  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5086  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "\n",
       "                                       Point_constraint  \\\n",
       "0     [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "1     [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ...   \n",
       "2     [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "3     [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, ...   \n",
       "4     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "...                                                 ...   \n",
       "5082  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5083  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5084  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "5085  [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...   \n",
       "5086  [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ...   \n",
       "\n",
       "                                    UnderFly_constraint  \n",
       "0     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "1     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "2     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "3     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "4     [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "...                                                 ...  \n",
       "5082  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "5083  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "5084  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "5085  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "5086  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "\n",
       "[5087 rows x 8 columns]"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Apply the function to the entire DataFrame\n",
    "df_strategy_float = df_strategy.applymap(convert_to_float)\n",
    "df_strategy_float"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**约束展开并制作输出数据集**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>0</th>\n",
       "      <th>1</th>\n",
       "      <th>2</th>\n",
       "      <th>3</th>\n",
       "      <th>4</th>\n",
       "      <th>5</th>\n",
       "      <th>6</th>\n",
       "      <th>7</th>\n",
       "      <th>8</th>\n",
       "      <th>9</th>\n",
       "      <th>...</th>\n",
       "      <th>341</th>\n",
       "      <th>342</th>\n",
       "      <th>343</th>\n",
       "      <th>344</th>\n",
       "      <th>345</th>\n",
       "      <th>346</th>\n",
       "      <th>347</th>\n",
       "      <th>348</th>\n",
       "      <th>349</th>\n",
       "      <th>350</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>54.3048</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>51.6335</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>62.7271</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>50.6284</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>68.3439</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5082</th>\n",
       "      <td>62.8213</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5083</th>\n",
       "      <td>40.2404</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5084</th>\n",
       "      <td>63.4039</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5085</th>\n",
       "      <td>53.7598</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5086</th>\n",
       "      <td>68.4604</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5087 rows × 351 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "          0    1    2    3    4    5    6    7    8    9    ...  341  342  \\\n",
       "0     54.3048  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "1     51.6335  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "2     62.7271  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "3     50.6284  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "4     68.3439  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "...       ...  ...  ...  ...  ...  ...  ...  ...  ...  ...  ...  ...  ...   \n",
       "5082  62.8213  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "5083  40.2404  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "5084  63.4039  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "5085  53.7598  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "5086  68.4604  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  ...  0.0  0.0   \n",
       "\n",
       "      343  344  345  346  347  348  349  350  \n",
       "0     0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "1     0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "2     0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "3     0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "4     0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "...   ...  ...  ...  ...  ...  ...  ...  ...  \n",
       "5082  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "5083  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "5084  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "5085  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "5086  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n",
       "\n",
       "[5087 rows x 351 columns]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Function to concatenate lists in a row\n",
    "def concat_lists(row):\n",
    "    concatenated_row = []\n",
    "    for col in df_strategy_float.columns:\n",
    "        if isinstance(row[col], list):\n",
    "            concatenated_row.extend(row[col])\n",
    "        else:\n",
    "            concatenated_row.append(row[col])\n",
    "    return pd.Series(concatenated_row)\n",
    "\n",
    "# Apply the function to each row of the DataFrame\n",
    "df_concatenated = df_strategy_float.apply(concat_lists, axis=1)\n",
    "# Convert the concatenated DataFrame to a PyTorch tensor\n",
    "y = torch.tensor(df_concatenated.values, dtype=torch.float32,device=my_device)\n",
    "df_concatenated"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**数据集范围**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Range of x_0: [ 1501.0839 ,  2499.047 ]\n",
      "Range of y_0: [ 1200.6284 ,  1799.595 ]\n",
      "Range of v_x: [ -99.6259 ,  99.7854 ]\n",
      "Range of v_y: [ -133.8294 ,  -50.0468 ]\n",
      "Range of Final Time: [ 27.75 ,  156.2462 ]\n"
     ]
    }
   ],
   "source": [
    "print(\"Range of x_0: [\", min(df_theta[\"x_0\"]),\", \", max(df_theta[\"x_0\"]),\"]\")\n",
    "print(\"Range of y_0: [\", min(df_theta[\"y_0\"]),\", \", max(df_theta[\"y_0\"]),\"]\")\n",
    "print(\"Range of v_x: [\", min(df_theta[\"v_x\"]),\", \", max(df_theta[\"v_x\"]),\"]\")\n",
    "print(\"Range of v_y: [\", min(df_theta[\"v_y\"]),\", \", max(df_theta[\"v_y\"]),\"]\")\n",
    "print(\"Range of Final Time: [\", min(df_strategy[\"t_final\"]),\", \", max(df_strategy[\"t_final\"]),\"]\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**创建数据集**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dataset size:  5087\n"
     ]
    }
   ],
   "source": [
    "# Create the dataset\n",
    "n = x.shape[0]\n",
    "print('Dataset size: ',n)\n",
    "y_time = y[:,0]\n",
    "y_constraints = y[:,1:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([54.3048, 51.6335, 62.7271,  ..., 63.4039, 53.7598, 68.4604],\n",
       "       device='cuda:0')"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 1., 1.,  ..., 0., 0., 0.],\n",
       "        [1., 1., 1.,  ..., 0., 0., 0.],\n",
       "        [1., 1., 1.,  ..., 0., 0., 0.],\n",
       "        ...,\n",
       "        [1., 1., 1.,  ..., 0., 0., 0.],\n",
       "        [1., 1., 1.,  ..., 0., 0., 0.],\n",
       "        [1., 1., 1.,  ..., 0., 0., 0.]], device='cuda:0')"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_constraints"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**分割测试集和数据集，并对其进行s-score标准化**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_constraints_train, x_constraints_test, y_constraints_train, y_constraints_test = train_test_split(x, y_constraints, test_size=0.2, random_state=42)\n",
    "x_time_train, x_time_test, y_time_train, y_time_test = train_test_split(x, y_time, test_size=0.2, random_state=42)\n",
    "\n",
    "# Calculate the mean and standard deviation of your features for constraints\n",
    "mean_constraints = torch.mean(x_constraints_train)\n",
    "std_constraints = torch.std(x_constraints_train)\n",
    "\n",
    "# Standardize your features\n",
    "standardized_x_constraints_train = (x_constraints_train - mean_constraints) / std_constraints\n",
    "standardized_x_constraints_test = (x_constraints_test - mean_constraints) / std_constraints\n",
    "\n",
    "# Calculate the mean and standard deviation of your features for time\n",
    "mean_time = torch.mean(x_time_train)\n",
    "std_time = torch.std(x_time_train)\n",
    "\n",
    "# Standardize your features\n",
    "standardized_x_time_train = (x_time_train - mean_time) / std_time\n",
    "standardized_x_time_test = (x_time_test - mean_time) / std_time"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**生成数据集**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Now you can create a new TensorDataset with the standardized features\n",
    "standardized_train_dataset_constraints = TensorDataset(standardized_x_constraints_train, y_constraints_train)\n",
    "standardized_test_dataset_constraints = TensorDataset(standardized_x_constraints_test, y_constraints_test)\n",
    "\n",
    "train_dataset_constraints = TensorDataset(x_constraints_train, y_constraints_train)\n",
    "test_dataset_constraints = TensorDataset(x_constraints_test, y_constraints_test)\n",
    "\n",
    "standardized_train_dataset_time = TensorDataset(standardized_x_time_train, y_time_train)\n",
    "standardized_test_dataset_time = TensorDataset(standardized_x_time_test, y_time_test)\n",
    "\n",
    "train_dataset_time = TensorDataset(x_time_train, y_time_train)\n",
    "train_dataset_time = TensorDataset(x_time_test, y_time_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 设置模型和训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 通用Transformer神经网络结构"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TransformerModel(nn.Module):\n",
    "    def __init__(self, input_size, output_size, d_model, nhead, num_layers, dropout):\n",
    "        super(TransformerModel, self).__init__()\n",
    "\n",
    "        self.d_model = d_model\n",
    "        self.encoder = nn.Linear(input_size, d_model)\n",
    "        self.pos_encoder = PositionalEncoding(d_model, dropout)\n",
    "        self.transformer_encoder = nn.TransformerEncoder(nn.TransformerEncoderLayer(d_model, nhead), num_layers)\n",
    "        self.decoder = nn.Linear(d_model, output_size)\n",
    "\n",
    "    def forward(self, src):\n",
    "        # src shape: (batch_size, input_size)\n",
    "        #print(\"src shape:\",src.shape)\n",
    "        x = self.encoder(src) * math.sqrt(self.d_model)\n",
    "        #print(\"encoder shape:\",x.shape)\n",
    "        x = self.pos_encoder(x)\n",
    "        #print(\"pos_encoder shape:\",x.shape)\n",
    "        x = self.transformer_encoder(x)\n",
    "        #print(\"transformer shape:\",x.shape)\n",
    "        x = self.decoder(x[:, -1, :])\n",
    "        #print(\"decoder shape:\",x.shape)\n",
    "        return x\n",
    "        \n",
    "class PositionalEncoding(nn.Module):\n",
    "    def __init__(self, d_model, dropout, max_len=5000):\n",
    "        super(PositionalEncoding, self).__init__()\n",
    "        self.dropout = nn.Dropout(p=dropout)\n",
    "        \n",
    "        pe = torch.zeros(max_len, d_model)\n",
    "        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n",
    "        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n",
    "        # note:use broadcast 机制\n",
    "        pe[:, 0::2] = torch.sin(position * div_term)\n",
    "        pe[:, 1::2] = torch.cos(position * div_term)\n",
    "        pe = pe.unsqueeze(0).transpose(0, 1)\n",
    "        self.register_buffer('pe', pe)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # note:use broadcast 机制\n",
    "        #print(\"pe shape:\",self.pe[:x.size(0), :].shape)\n",
    "        #print(\"x shape\",x.shape)\n",
    "        x = x + self.pe[:x.size(0), :]\n",
    "        #print(\"x add shape\",x.shape)\n",
    "        return self.dropout(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 创建TimeNN的神经网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define hyperparameters\n",
    "N = 50\n",
    "input_size = 4\n",
    "output_size = 1\n",
    "d_model = 384\n",
    "nhead = 3\n",
    "num_layers = 4    # more layers might be needed for complex tasks\n",
    "dropout = 0.1     # a common dropout rate for Transformer models\n",
    "\n",
    "# Instantiate the model\n",
    "timeTransformer = TransformerModel(input_size=input_size,\n",
    "                               output_size=output_size,\n",
    "                               d_model=d_model,\n",
    "                               nhead=nhead, num_layers=num_layers, dropout=dropout)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**模型结构**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(timeTransformer)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**模型参数量**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "total_params = sum(\n",
    "\tparam.numel() for param in timeTransformer.parameters()\n",
    ")\n",
    "\n",
    "total_params"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**训练timeNN的神经网络**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transformer = timeTransformer.to(my_device)\n",
    "train_dataset = standardized_train_dataset_time\n",
    "test_dataset = standardized_test_dataset_time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create the dataloader\n",
    "batch_size = 10\n",
    "learning_rate = 0.001\n",
    "\n",
    "# Define the loss function and optimizer\n",
    "criterion = nn.MSELoss()\n",
    "optimizer = torch.optim.Adam(transformer.parameters(), lr=learning_rate)\n",
    "warmup_steps = 40000  # define the number of warmup steps\n",
    "\n",
    "\n",
    "def lr_lambda(current_step):\n",
    "    if current_step < warmup_steps:\n",
    "        return current_step*10 / warmup_steps\n",
    "    elif current_step == warmup_steps:\n",
    "        return 0.001\n",
    "    return 0.001 * (current_step - warmup_steps) ** -0.5\n",
    "\n",
    "# `optimizer` is the optimizer for your model\n",
    "scheduler = LambdaLR(optimizer, lr_lambda)\n",
    "\n",
    "# Train the model\n",
    "num_epochs = 100\n",
    "num_splits = 8\n",
    "kf = KFold(n_splits=num_splits)\n",
    "\n",
    "#train_dataset = Subset(train_dataset, indices=range(1, 40000))  # Use the first 40000 samples\n",
    "\n",
    "\n",
    "# Loop over the k-fold splits\n",
    "for fold, (train_idx, valid_idx) in enumerate(kf.split(train_dataset)):\n",
    "    print(f\"Fold: {fold + 1}\")\n",
    "    train_data = torch.utils.data.Subset(train_dataset, train_idx)\n",
    "    valid_data = torch.utils.data.Subset(train_dataset, valid_idx)\n",
    "\n",
    "    train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n",
    "    valid_dataloader = DataLoader(valid_data, batch_size=batch_size, shuffle=True)\n",
    "    loss_values = []\n",
    "    valid_loss_values = []\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        running_loss = 0.0\n",
    "        valid_running_loss = 0.0\n",
    "\n",
    "        # Train on the training set\n",
    "        transformer.train()\n",
    "        for i, batch in enumerate(train_dataloader):\n",
    "            inputs, labels = batch\n",
    "            optimizer.zero_grad()\n",
    "            outputs = transformer(inputs)\n",
    "            loss = criterion(outputs.squeeze(), labels.squeeze())\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            \n",
    "            # Step the scheduler\n",
    "            scheduler.step()\n",
    "            \n",
    "            running_loss += loss.item()\n",
    "            \n",
    "            avg_loss = running_loss\n",
    "            loss_values.append(avg_loss)\n",
    "            running_loss = 0.0\n",
    "\n",
    "            if (i) % 100 == 0:\n",
    "                print('[epoch = %d, i = %5d] loss: %.3f' % (epoch + 1, i + 1, avg_loss))                \n",
    "\n",
    "        # Validate on the validation set\n",
    "        transformer.eval()\n",
    "        with torch.no_grad():\n",
    "            for i, batch in enumerate(valid_dataloader):\n",
    "                inputs, labels = batch\n",
    "                outputs = transformer(inputs)\n",
    "                valid_loss = criterion(outputs.squeeze(), labels.squeeze())\n",
    "                valid_running_loss += valid_loss.item()\n",
    "\n",
    "            avg_valid_loss = valid_running_loss / len(valid_dataloader)\n",
    "            valid_loss_values.append(avg_valid_loss)\n",
    "            print(f\"Validation Loss: {avg_valid_loss:.3f}\")\n",
    "\n",
    "    # Plot the loss values\n",
    "    plt.plot(loss_values, label=\"Training Loss\")\n",
    "    #plt.plot(valid_loss_values, label=\"Validation Loss\")\n",
    "    plt.xlabel('Steps')\n",
    "    plt.ylabel('Loss')\n",
    "    plt.title(f\"Fold {fold + 1} Loss\")\n",
    "    plt.legend()\n",
    "    plt.show()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**保存TimeNN模型**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(transformer, 'Model/transformer_model_vf_0_time_normalized_large.pt')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### ConstrainNN创建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define hyperparameters\n",
    "N = 50\n",
    "input_size = 4\n",
    "output_size = 7*N\n",
    "d_model = 384\n",
    "nhead = 2\n",
    "num_layers = 4    # more layers might be needed for complex tasks\n",
    "dropout = 0.1     # a common dropout rate for Transformer models\n",
    "\n",
    "# Instantiate the model\n",
    "constraintsTransformer = TransformerModel(input_size=input_size,\n",
    "                               output_size=output_size,\n",
    "                               d_model=d_model,\n",
    "                               nhead=nhead, num_layers=num_layers, dropout=dropout)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**模型结构**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(constraintsTransformer)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**模型参数量**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "total_params = sum(\n",
    "\tparam.numel() for param in constraintsTransformer.parameters()\n",
    ")\n",
    "\n",
    "total_params"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**constrainNN 训练**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transformer = constraintsTransformer.to(my_device)\n",
    "train_dataset = standardized_train_dataset_constraints\n",
    "test_dataset = standardized_test_dataset_constraints"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create the dataloader\n",
    "batch_size = 50\n",
    "learning_rate = 0.001\n",
    "\n",
    "# Define the loss function and optimizer\n",
    "criterion = nn.MSELoss()\n",
    "optimizer = torch.optim.Adam(transformer.parameters(), lr=learning_rate)\n",
    "\n",
    "warmup_steps = 4000  # define the number of warmup steps\n",
    "\n",
    "# Train the model\n",
    "num_epochs = 50\n",
    "num_splits = 3\n",
    "kf = KFold(n_splits=num_splits)\n",
    "\n",
    "#train_dataset = Subset(train_dataset, indices=range(0, 10000))  # Use the first 10000 samples\n",
    "\n",
    "\n",
    "# Loop over the k-fold splits\n",
    "for fold, (train_idx, valid_idx) in enumerate(kf.split(train_dataset)):\n",
    "    print(f\"Fold: {fold + 1}\")\n",
    "    train_data = torch.utils.data.Subset(train_dataset, train_idx)\n",
    "    print(f\"train_data length {len(train_data)}\")\n",
    "    valid_data = torch.utils.data.Subset(train_dataset, valid_idx)\n",
    "    print(f\"train_data length {len(valid_data)}\")\n",
    "\n",
    "    train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n",
    "    valid_dataloader = DataLoader(valid_data, batch_size=batch_size, shuffle=True)\n",
    "    loss_values = []\n",
    "    valid_loss_values = []\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        running_loss = 0.0\n",
    "        valid_running_loss = 0.0\n",
    "\n",
    "        # Train on the training set\n",
    "        transformer.train()\n",
    "        for i, batch in enumerate(train_dataloader):\n",
    "            inputs, labels = batch\n",
    "            optimizer.zero_grad()\n",
    "            outputs = transformer(inputs)\n",
    "            loss = criterion(outputs.squeeze(), labels.squeeze())\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            running_loss += loss.item()\n",
    "            \n",
    "            avg_loss = running_loss\n",
    "            loss_values.append(avg_loss)\n",
    "            running_loss = 0.0\n",
    "\n",
    "            if (i) % 20 == 0:\n",
    "                print('[epoch = %d, i = %5d] loss: %.3f' % (epoch + 1, i + 1, avg_loss))                \n",
    "\n",
    "        # Validate on the validation set\n",
    "        transformer.eval()\n",
    "        with torch.no_grad():\n",
    "            for i, batch in enumerate(valid_dataloader):\n",
    "                inputs, labels = batch\n",
    "                outputs = transformer(inputs)\n",
    "                valid_loss = criterion(outputs.squeeze(), labels.squeeze())\n",
    "                valid_running_loss += valid_loss.item()\n",
    "\n",
    "            avg_valid_loss = valid_running_loss / len(valid_dataloader)\n",
    "            valid_loss_values.append(avg_valid_loss)\n",
    "            print(f\"Validation Loss: {avg_valid_loss:.3f}\")\n",
    "\n",
    "    # Plot the loss values\n",
    "    plt.plot(loss_values, label=\"Training Loss\")\n",
    "    #plt.plot(valid_loss_values, label=\"Validation Loss\")\n",
    "    plt.xlabel('Steps')\n",
    "    plt.ylabel('Loss')\n",
    "    plt.title(f\"Fold {fold + 1} Loss\")\n",
    "    plt.legend()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**保存ConstrainNN模型**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(transformer, 'Model/transformer_model_vf_0_constrain_normalized_changebatchsize.pt')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 评估模型训练效果"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### ConstrainNN模型评估训练效果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Binary accuracy on test set: 93.07 %\n",
      "[96.85714285714286, 88.85714285714286, 96.28571428571429, 93.71428571428571, 89.42857142857143, 90.85714285714286, 86.0, 93.14285714285714, 86.0, 97.14285714285714, 90.28571428571429, 97.71428571428571, 93.71428571428571, 88.85714285714286, 96.0, 89.71428571428571, 97.14285714285714, 94.85714285714286, 96.28571428571429, 91.14285714285714, 93.14285714285714, 97.42857142857143, 95.42857142857143, 96.0, 93.14285714285714, 91.42857142857143, 92.0, 89.71428571428571, 90.85714285714286, 88.0, 89.14285714285714, 96.28571428571429, 93.14285714285714, 85.71428571428571, 89.14285714285714, 98.85714285714286, 95.14285714285714, 96.57142857142857, 97.42857142857143, 95.14285714285714, 97.14285714285714, 88.85714285714286, 93.42857142857143, 92.57142857142857, 97.71428571428571, 95.71428571428571, 96.0, 94.0, 91.14285714285714, 98.28571428571429, 89.71428571428571, 91.71428571428571, 89.42857142857143, 95.14285714285714, 92.57142857142857, 97.71428571428571, 89.14285714285714, 96.85714285714286, 97.14285714285714, 88.28571428571429, 96.28571428571429, 98.0, 92.28571428571429, 96.57142857142857, 96.0, 95.42857142857143, 97.42857142857143, 95.42857142857143, 93.42857142857143, 94.57142857142857, 90.85714285714286, 94.0, 95.71428571428571, 88.85714285714286, 97.14285714285714, 98.28571428571429, 93.71428571428571, 99.14285714285714, 94.0, 90.85714285714286, 87.42857142857143, 94.85714285714286, 89.42857142857143, 93.71428571428571, 92.28571428571429, 92.57142857142857, 96.28571428571429, 97.71428571428571, 98.0, 93.42857142857143, 93.42857142857143, 96.57142857142857, 93.14285714285714, 94.28571428571429, 98.57142857142857, 94.0, 96.28571428571429, 96.57142857142857, 96.0, 89.42857142857143, 95.42857142857143, 95.71428571428571, 93.42857142857143, 90.57142857142857, 96.28571428571429, 90.57142857142857, 96.57142857142857, 97.14285714285714, 92.57142857142857, 91.42857142857143, 98.57142857142857, 94.57142857142857, 97.42857142857143, 96.85714285714286, 98.28571428571429, 98.0, 95.71428571428571, 86.0, 93.42857142857143, 91.42857142857143, 95.14285714285714, 93.14285714285714, 92.28571428571429, 96.85714285714286, 88.57142857142857, 96.85714285714286, 93.71428571428571, 92.85714285714286, 97.71428571428571, 87.71428571428571, 94.0, 94.28571428571429, 97.71428571428571, 97.14285714285714, 93.42857142857143, 93.71428571428571, 93.71428571428571, 92.85714285714286, 89.14285714285714, 96.57142857142857, 94.0, 89.71428571428571, 97.14285714285714, 95.14285714285714, 96.28571428571429, 91.14285714285714, 96.0, 94.28571428571429, 97.42857142857143, 91.42857142857143, 92.57142857142857, 94.57142857142857, 92.0, 89.71428571428571, 96.0, 97.71428571428571, 92.85714285714286, 88.57142857142857, 90.85714285714286, 93.71428571428571, 94.0, 90.85714285714286, 91.14285714285714, 90.28571428571429, 93.42857142857143, 91.42857142857143, 89.71428571428571, 96.28571428571429, 97.42857142857143, 93.14285714285714, 91.71428571428571, 90.57142857142857, 92.0, 85.71428571428571, 93.71428571428571, 96.57142857142857, 93.71428571428571, 96.0, 90.85714285714286, 97.14285714285714, 93.71428571428571, 89.14285714285714, 87.42857142857143, 98.57142857142857, 88.57142857142857, 88.57142857142857, 90.85714285714286, 94.85714285714286, 94.28571428571429, 96.57142857142857, 94.85714285714286, 87.42857142857143, 89.14285714285714, 89.71428571428571, 92.0, 92.28571428571429, 88.85714285714286, 96.85714285714286, 92.57142857142857, 91.14285714285714, 97.42857142857143, 92.85714285714286, 93.71428571428571, 95.71428571428571, 91.42857142857143, 93.71428571428571, 97.42857142857143, 97.42857142857143, 89.14285714285714, 88.57142857142857, 90.0, 89.14285714285714, 96.28571428571429, 91.14285714285714, 87.14285714285714, 94.0, 93.71428571428571, 94.85714285714286, 90.57142857142857, 93.71428571428571, 96.57142857142857, 92.85714285714286, 98.57142857142857, 89.42857142857143, 93.42857142857143, 93.71428571428571, 98.28571428571429, 91.71428571428571, 96.85714285714286, 93.14285714285714, 93.42857142857143, 95.71428571428571, 92.85714285714286, 89.42857142857143, 93.14285714285714, 92.28571428571429, 90.85714285714286, 94.85714285714286, 91.14285714285714, 87.71428571428571, 93.71428571428571, 93.14285714285714, 92.28571428571429, 93.71428571428571, 91.14285714285714, 95.71428571428571, 93.14285714285714, 91.14285714285714, 88.28571428571429, 93.14285714285714, 93.71428571428571, 91.71428571428571, 92.85714285714286, 92.85714285714286, 93.42857142857143, 89.71428571428571, 93.14285714285714, 85.42857142857143, 98.0, 94.0, 90.85714285714286, 97.42857142857143, 96.28571428571429, 88.28571428571429, 90.85714285714286, 96.28571428571429, 93.71428571428571, 95.42857142857143, 93.42857142857143, 93.71428571428571, 96.57142857142857, 88.0, 89.42857142857143, 95.14285714285714, 93.42857142857143, 93.71428571428571, 89.14285714285714, 93.42857142857143, 90.85714285714286, 89.14285714285714, 91.14285714285714, 98.0, 98.0, 93.14285714285714, 96.0, 92.28571428571429, 98.0, 89.42857142857143, 97.14285714285714, 93.14285714285714, 90.85714285714286, 95.71428571428571, 93.42857142857143, 89.71428571428571, 89.14285714285714, 96.0, 88.28571428571429, 98.57142857142857, 88.0, 96.0, 93.42857142857143, 94.57142857142857, 91.14285714285714, 87.42857142857143, 90.28571428571429, 94.57142857142857, 90.28571428571429, 95.71428571428571, 93.14285714285714, 94.0, 96.57142857142857, 90.85714285714286, 93.71428571428571, 96.28571428571429, 93.42857142857143, 94.57142857142857, 90.57142857142857, 94.0, 93.71428571428571, 94.0, 94.28571428571429, 96.57142857142857, 92.85714285714286, 90.85714285714286, 87.42857142857143, 95.42857142857143, 97.71428571428571, 92.57142857142857, 96.28571428571429, 90.85714285714286, 96.85714285714286, 97.42857142857143, 97.14285714285714, 86.57142857142857, 94.57142857142857, 93.42857142857143, 88.57142857142857, 94.28571428571429, 94.0, 88.57142857142857, 94.85714285714286, 93.42857142857143, 94.28571428571429, 97.42857142857143, 92.0, 96.57142857142857, 85.14285714285714, 92.57142857142857, 90.85714285714286, 93.71428571428571, 92.0, 96.57142857142857, 90.28571428571429, 91.71428571428571, 96.0, 93.71428571428571, 87.71428571428571, 93.14285714285714, 96.0, 92.57142857142857, 90.28571428571429, 94.57142857142857, 91.14285714285714, 97.14285714285714, 91.14285714285714, 98.28571428571429, 90.85714285714286, 97.14285714285714, 90.57142857142857, 88.85714285714286, 85.14285714285714, 88.0, 87.42857142857143, 94.28571428571429, 93.71428571428571, 88.85714285714286, 94.28571428571429, 88.28571428571429, 97.14285714285714, 98.85714285714286, 98.0, 91.42857142857143, 93.42857142857143, 96.85714285714286, 93.14285714285714, 88.0, 93.71428571428571, 96.57142857142857, 87.42857142857143, 90.85714285714286, 90.57142857142857, 89.42857142857143, 93.71428571428571, 93.71428571428571, 90.28571428571429, 97.42857142857143, 92.85714285714286, 95.71428571428571, 96.57142857142857, 88.57142857142857, 95.14285714285714, 90.0, 85.42857142857143, 93.71428571428571, 95.71428571428571, 89.71428571428571, 89.42857142857143, 95.71428571428571, 94.0, 93.42857142857143, 90.85714285714286, 89.71428571428571, 91.14285714285714, 90.57142857142857, 89.71428571428571, 89.42857142857143, 97.14285714285714, 96.0, 98.0, 96.57142857142857, 91.42857142857143, 94.85714285714286, 97.42857142857143, 92.28571428571429, 95.14285714285714, 92.85714285714286, 89.42857142857143, 90.85714285714286, 90.85714285714286, 96.0, 92.85714285714286, 89.71428571428571, 94.0, 93.71428571428571, 92.57142857142857, 93.42857142857143, 90.57142857142857, 91.42857142857143, 95.14285714285714, 87.14285714285714, 91.71428571428571, 88.0, 96.28571428571429, 90.57142857142857, 97.14285714285714, 92.28571428571429, 90.57142857142857, 94.85714285714286, 93.71428571428571, 93.14285714285714, 91.42857142857143, 94.57142857142857, 90.0, 93.42857142857143, 96.28571428571429, 87.71428571428571, 89.14285714285714, 93.42857142857143, 94.0, 96.85714285714286, 96.85714285714286, 92.85714285714286, 96.28571428571429, 90.85714285714286, 95.71428571428571, 88.57142857142857, 88.28571428571429, 93.14285714285714, 95.71428571428571, 93.71428571428571, 89.42857142857143, 94.0, 94.0, 96.0, 95.42857142857143, 90.85714285714286, 92.85714285714286, 91.14285714285714, 93.42857142857143, 90.28571428571429, 88.0, 90.85714285714286, 91.14285714285714, 94.57142857142857, 93.42857142857143, 94.0, 94.28571428571429, 92.57142857142857, 91.14285714285714, 91.14285714285714, 96.28571428571429, 91.42857142857143, 93.71428571428571, 88.57142857142857, 88.85714285714286, 89.71428571428571, 92.0, 94.85714285714286, 95.71428571428571, 96.57142857142857, 95.14285714285714, 93.14285714285714, 93.71428571428571, 94.85714285714286, 88.28571428571429, 96.57142857142857, 93.71428571428571, 87.42857142857143, 94.28571428571429, 87.71428571428571, 90.85714285714286, 95.42857142857143, 95.14285714285714, 90.57142857142857, 92.28571428571429, 98.28571428571429, 93.14285714285714, 85.71428571428571, 89.14285714285714, 93.14285714285714, 93.71428571428571, 90.0, 96.0, 90.85714285714286, 93.14285714285714, 96.85714285714286, 89.42857142857143, 89.71428571428571, 97.42857142857143, 92.0, 98.0, 95.14285714285714, 89.14285714285714, 94.0, 90.85714285714286, 90.85714285714286, 93.14285714285714, 93.14285714285714, 93.14285714285714, 93.71428571428571, 91.42857142857143, 93.71428571428571, 93.71428571428571, 93.71428571428571, 89.71428571428571, 92.0, 96.85714285714286, 93.71428571428571, 92.28571428571429, 96.57142857142857, 94.57142857142857, 92.85714285714286, 93.14285714285714, 88.57142857142857, 90.28571428571429, 91.71428571428571, 98.0, 87.71428571428571, 89.14285714285714, 87.71428571428571, 94.0, 93.71428571428571, 97.14285714285714, 87.71428571428571, 87.71428571428571, 93.71428571428571, 95.14285714285714, 89.42857142857143, 95.42857142857143, 91.14285714285714, 91.42857142857143, 93.42857142857143, 87.14285714285714, 96.57142857142857, 92.85714285714286, 97.14285714285714, 93.71428571428571, 90.85714285714286, 94.85714285714286, 93.14285714285714, 92.28571428571429, 93.71428571428571, 96.85714285714286, 93.14285714285714, 96.0, 96.0, 97.14285714285714, 93.42857142857143, 87.71428571428571, 92.85714285714286, 94.0, 89.14285714285714, 89.71428571428571, 97.42857142857143, 97.42857142857143, 94.85714285714286, 96.28571428571429, 90.85714285714286, 96.85714285714286, 91.42857142857143, 94.57142857142857, 93.71428571428571, 94.0, 90.28571428571429, 93.42857142857143, 96.0, 93.14285714285714, 95.14285714285714, 89.42857142857143, 87.42857142857143, 93.71428571428571, 98.57142857142857, 92.0, 98.28571428571429, 93.71428571428571, 90.85714285714286, 96.28571428571429, 97.14285714285714, 93.71428571428571, 97.14285714285714, 93.71428571428571, 98.0, 93.71428571428571, 98.28571428571429, 90.85714285714286, 96.28571428571429, 91.14285714285714, 98.28571428571429, 97.42857142857143, 92.0, 89.71428571428571, 94.57142857142857, 92.28571428571429, 96.0, 87.42857142857143, 89.14285714285714, 89.71428571428571, 93.71428571428571, 90.85714285714286, 89.42857142857143, 89.14285714285714, 91.14285714285714, 88.85714285714286, 92.57142857142857, 95.42857142857143, 96.57142857142857, 95.71428571428571, 88.57142857142857, 91.71428571428571, 89.42857142857143, 92.28571428571429, 93.14285714285714, 89.42857142857143, 91.14285714285714, 91.14285714285714, 88.85714285714286, 94.57142857142857, 92.28571428571429, 93.71428571428571, 93.14285714285714, 95.14285714285714, 92.57142857142857, 95.71428571428571, 92.85714285714286, 97.14285714285714, 97.71428571428571, 94.57142857142857, 88.28571428571429, 95.71428571428571, 89.14285714285714, 97.14285714285714, 88.85714285714286, 93.71428571428571, 94.28571428571429, 92.0, 90.85714285714286, 95.71428571428571, 93.42857142857143, 89.14285714285714, 90.85714285714286, 87.42857142857143, 96.85714285714286, 92.85714285714286, 95.42857142857143, 89.71428571428571, 93.71428571428571, 94.28571428571429, 92.28571428571429, 96.57142857142857, 97.42857142857143, 89.42857142857143, 91.14285714285714, 93.71428571428571, 92.0, 95.14285714285714, 94.85714285714286, 93.14285714285714, 88.0, 93.71428571428571, 88.57142857142857, 94.28571428571429, 93.42857142857143, 92.85714285714286, 96.28571428571429, 93.71428571428571, 92.85714285714286, 94.57142857142857, 91.14285714285714, 89.71428571428571, 93.42857142857143, 89.71428571428571, 95.42857142857143, 92.85714285714286, 94.0, 96.85714285714286, 97.14285714285714, 92.0, 96.28571428571429, 87.71428571428571, 90.0, 89.14285714285714, 93.14285714285714, 88.28571428571429, 93.42857142857143, 90.85714285714286, 94.0, 89.71428571428571, 95.14285714285714, 95.14285714285714, 97.42857142857143, 96.28571428571429, 94.85714285714286, 89.42857142857143, 93.71428571428571, 92.0, 93.42857142857143, 89.14285714285714, 87.14285714285714, 92.85714285714286, 94.85714285714286, 91.71428571428571, 93.14285714285714, 96.85714285714286, 93.71428571428571, 89.42857142857143, 93.14285714285714, 90.57142857142857, 91.14285714285714, 88.85714285714286, 89.42857142857143, 92.28571428571429, 88.28571428571429, 90.28571428571429, 95.71428571428571, 87.71428571428571, 92.85714285714286, 90.57142857142857, 94.28571428571429, 91.14285714285714, 92.85714285714286, 96.57142857142857, 90.85714285714286, 96.0, 96.57142857142857, 88.57142857142857, 92.0, 87.71428571428571, 90.28571428571429, 88.28571428571429, 97.42857142857143, 88.0, 93.14285714285714, 97.42857142857143, 92.57142857142857, 92.85714285714286, 94.28571428571429, 93.71428571428571, 97.42857142857143, 89.42857142857143, 89.42857142857143, 88.85714285714286, 89.14285714285714, 91.42857142857143, 97.14285714285714, 90.28571428571429, 91.14285714285714, 96.28571428571429, 88.85714285714286, 88.85714285714286, 92.0, 93.42857142857143, 90.57142857142857, 88.0, 94.28571428571429, 97.71428571428571, 92.28571428571429, 94.57142857142857, 97.71428571428571, 95.71428571428571, 89.71428571428571, 97.14285714285714, 87.71428571428571, 97.42857142857143, 92.85714285714286, 93.71428571428571, 88.0, 94.57142857142857, 93.42857142857143, 94.57142857142857, 90.85714285714286, 96.57142857142857, 92.28571428571429, 90.85714285714286, 92.57142857142857, 85.14285714285714, 97.14285714285714, 89.42857142857143, 94.57142857142857, 92.57142857142857, 95.71428571428571, 90.0, 98.28571428571429, 92.57142857142857, 89.42857142857143, 98.0, 93.14285714285714, 98.85714285714286, 90.57142857142857, 89.71428571428571, 95.71428571428571, 97.42857142857143, 95.71428571428571, 93.71428571428571, 93.71428571428571, 88.85714285714286, 93.14285714285714, 93.14285714285714, 98.0, 87.42857142857143, 95.42857142857143, 91.14285714285714, 91.71428571428571, 93.42857142857143, 96.0, 88.57142857142857, 89.42857142857143, 89.42857142857143, 92.85714285714286, 92.28571428571429, 91.14285714285714, 97.42857142857143, 93.71428571428571, 91.42857142857143, 92.85714285714286, 93.71428571428571, 96.85714285714286, 89.42857142857143, 93.71428571428571, 96.85714285714286, 93.42857142857143, 94.85714285714286, 93.14285714285714, 93.14285714285714, 96.0, 89.42857142857143, 94.0, 95.42857142857143, 93.14285714285714, 93.71428571428571, 95.42857142857143, 95.42857142857143, 90.85714285714286, 92.57142857142857, 94.0, 93.71428571428571, 93.71428571428571, 92.28571428571429, 98.57142857142857, 92.85714285714286, 96.28571428571429, 96.57142857142857, 96.0, 96.57142857142857, 93.14285714285714, 93.42857142857143, 89.14285714285714, 93.42857142857143, 93.42857142857143, 95.71428571428571, 98.28571428571429, 91.14285714285714, 93.42857142857143, 96.57142857142857, 96.28571428571429, 95.14285714285714, 96.85714285714286, 90.85714285714286, 90.0, 95.71428571428571, 93.71428571428571, 93.42857142857143, 95.71428571428571, 89.71428571428571, 91.42857142857143, 93.14285714285714, 92.85714285714286, 90.0, 89.42857142857143, 93.14285714285714, 94.85714285714286, 89.71428571428571, 93.42857142857143, 92.0, 91.42857142857143, 88.0, 93.42857142857143, 93.42857142857143, 96.57142857142857, 91.14285714285714, 90.85714285714286, 93.42857142857143, 92.85714285714286, 94.57142857142857, 96.85714285714286, 92.85714285714286, 93.42857142857143, 91.14285714285714, 93.42857142857143, 86.57142857142857, 91.71428571428571, 89.42857142857143, 92.85714285714286, 93.71428571428571, 97.14285714285714, 98.28571428571429, 97.71428571428571, 94.85714285714286, 94.0, 98.28571428571429, 94.28571428571429, 94.0, 92.28571428571429, 88.57142857142857, 93.71428571428571, 98.28571428571429, 88.57142857142857, 91.71428571428571, 94.85714285714286, 88.57142857142857, 94.0, 90.85714285714286, 97.71428571428571, 94.0, 93.42857142857143, 92.85714285714286, 93.42857142857143, 93.71428571428571, 89.71428571428571, 90.85714285714286, 89.42857142857143, 96.28571428571429, 97.14285714285714, 89.71428571428571, 93.42857142857143, 90.28571428571429, 88.28571428571429, 89.14285714285714, 94.28571428571429, 96.57142857142857, 97.42857142857143, 93.42857142857143, 98.28571428571429, 91.42857142857143, 93.42857142857143, 91.14285714285714, 96.85714285714286, 93.14285714285714, 93.42857142857143, 93.42857142857143, 97.14285714285714, 92.57142857142857, 96.0, 92.0, 89.71428571428571, 95.71428571428571, 93.42857142857143, 98.28571428571429, 90.85714285714286, 91.14285714285714, 91.42857142857143, 93.42857142857143, 94.0, 94.28571428571429, 96.0, 88.85714285714286, 88.28571428571429, 96.85714285714286, 89.14285714285714, 93.71428571428571, 88.85714285714286, 92.57142857142857, 97.14285714285714, 96.28571428571429, 93.42857142857143, 88.85714285714286, 91.42857142857143, 93.71428571428571, 96.85714285714286, 88.85714285714286, 97.14285714285714, 96.85714285714286, 90.28571428571429, 89.42857142857143, 94.0, 97.42857142857143, 96.57142857142857, 93.71428571428571, 90.57142857142857]\n"
     ]
    }
   ],
   "source": [
    "ConstrainModel = torch.load('Model/transformer_model_vf_0_constrain_normalized_changebatchsize.pt')\n",
    "# 模型,数据集和测试集都进行替换\n",
    "transformer = ConstrainModel.to(my_device)\n",
    "train_dataset = standardized_train_dataset_constraints\n",
    "test_dataset = standardized_test_dataset_constraints\n",
    "# Initialize counters\n",
    "total = 0\n",
    "binary_correct = 0\n",
    "mse_sum = 0\n",
    "single_accuracy = []\n",
    "\n",
    "# Set the batch size for evaluation\n",
    "batch_size = 1\n",
    "\n",
    "# Create a DataLoader for the test dataset with the new batch size\n",
    "#test_data_small = Subset(test_dataset, indices=range(0, 20000))  # Use the first 20000 samples\n",
    "test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n",
    "\n",
    "\n",
    "# No gradient needed for evaluation\n",
    "with torch.no_grad():\n",
    "    for batch in test_dataloader:\n",
    "        # Get the inputs and labels for the current batch\n",
    "        inputs, labels = batch\n",
    "\n",
    "        # Forward pass\n",
    "        outputs = transformer(inputs)\n",
    "\n",
    "        # Update the count of correct predictions and total examples for binary part\n",
    "        total += labels.numel()\n",
    "        binary_correct += (torch.round(outputs) == labels).sum().item()\n",
    "        single_accuracy.append(100*(torch.round(outputs) == labels).sum().item()/labels.numel())\n",
    "\n",
    "# Calculate final metrics\n",
    "binary_accuracy = 100 * binary_correct / total\n",
    "\n",
    "# Print the metrics\n",
    "print(f'Binary accuracy on test set: {binary_accuracy:.2f} %')\n",
    "print(single_accuracy)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**计算在float32情况下，推理所需时间**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average elapsed time for each iteration: 0.6 ms\n"
     ]
    }
   ],
   "source": [
    "elapsed_times = []  # Add this line to store elapsed times\n",
    "\n",
    "for i in range(100):\n",
    "    start = time.time()\n",
    "    # Forward pass\n",
    "    outputs = transformer(inputs)\n",
    "    elapsed_times.append(time.time() - start)\n",
    "average_elapsed_time = sum(elapsed_times) / len(elapsed_times)\n",
    "print(\"Average elapsed time for each iteration: {:.1f} ms\".format(average_elapsed_time*1e3))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### TimeNN模型评估训练效果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Binary accuracy on test set: 39.19 %\n",
      "[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n",
      "Continuous MSE on test set: 334.4991\n"
     ]
    }
   ],
   "source": [
    "Time_model = torch.load('Model/transformer_model_vf_0_time_normalized_large.pt')\n",
    "# 模型,数据集和测试集都进行替换\n",
    "transformer = Time_model.to(my_device)\n",
    "train_dataset = standardized_train_dataset_time\n",
    "test_dataset = standardized_test_dataset_time\n",
    "# Initialize counters\n",
    "total = 0\n",
    "binary_correct = 0\n",
    "mse_sum = 0\n",
    "single_accuracy = []\n",
    "\n",
    "# Set the batch size for evaluation\n",
    "batch_size = 1\n",
    "\n",
    "# Create a DataLoader for the test dataset with the new batch size\n",
    "#test_data_small = Subset(test_dataset, indices=range(0, 20000))  # Use the first 20000 samples\n",
    "test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n",
    "\n",
    "\n",
    "# No gradient needed for evaluation\n",
    "with torch.no_grad():\n",
    "    for batch in test_dataloader:\n",
    "        # Get the inputs and labels for the current batch\n",
    "        inputs, labels = batch\n",
    "\n",
    "        # Forward pass\n",
    "        outputs = transformer(inputs)\n",
    "\n",
    "        # Update the count of correct predictions and total examples for binary part\n",
    "        total += labels.numel()\n",
    "        mse_sum += ((outputs - labels) ** 2).sum().item()\n",
    "        binary_correct += ((torch.abs(outputs-labels))< 10).sum().item()\n",
    "        single_accuracy.append(100*(torch.round(outputs) == labels).sum().item()/labels.numel())\n",
    "\n",
    "# Calculate final metrics\n",
    "binary_accuracy = 100 * binary_correct / total\n",
    "continuous_mse = mse_sum / total  # Note: this is MSE over all continuous samples\n",
    "\n",
    "# Print the metrics\n",
    "print(f'Binary accuracy on test set: {binary_accuracy:.2f} %')\n",
    "print(single_accuracy)\n",
    "\n",
    "# Print the metrics\n",
    "print(f'Continuous MSE on test set: {continuous_mse:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average elapsed time for each iteration: 0.7 ms\n"
     ]
    }
   ],
   "source": [
    "elapsed_times = []  # Add this line to store elapsed times\n",
    "\n",
    "for i in range(100):\n",
    "    start = time.time()\n",
    "    # Forward pass\n",
    "    outputs = transformer(inputs)\n",
    "    elapsed_times.append(time.time() - start)\n",
    "average_elapsed_time = sum(elapsed_times) / len(elapsed_times)\n",
    "print(\"Average elapsed time for each iteration: {:.1f} ms\".format(average_elapsed_time*1e3))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "GPT_env",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
