{
 "metadata": {
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3",
   "language": "python"
  },
  "language_info": {
   "name": "python",
   "version": "3.10.12",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "colab": {
   "provenance": [],
   "gpuType": "T4"
  },
  "accelerator": "GPU",
  "kaggle": {
   "accelerator": "nvidiaTeslaT4",
   "dataSources": [
    {
     "sourceId": 10381814,
     "sourceType": "datasetVersion",
     "datasetId": 6431251
    }
   ],
   "dockerImageVersionId": 30823,
   "isInternetEnabled": true,
   "language": "python",
   "sourceType": "notebook",
   "isGpuEnabled": true
  }
 },
 "nbformat_minor": 5,
 "nbformat": 4,
 "cells": [
  {
   "id": "93004d75fd7f5631",
   "cell_type": "code",
   "source": "import torch\nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\n#from pyDOE import lhs#拉丁超立方抽样\nfrom torch import nn\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)  # 只忽略UserWarning类型的警告",
   "metadata": {
    "id": "initial_id",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:36.417170Z",
     "iopub.execute_input": "2025-01-06T08:14:36.417542Z",
     "iopub.status.idle": "2025-01-06T08:14:41.904527Z",
     "shell.execute_reply.started": "2025-01-06T08:14:36.417515Z",
     "shell.execute_reply": "2025-01-06T08:14:41.901025Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-22T09:48:32.310396Z",
     "start_time": "2025-02-22T09:48:30.799784Z"
    }
   },
   "execution_count": 1,
   "outputs": []
  },
  {
   "id": "9984f15da0ff47ad",
   "cell_type": "code",
   "source": "if torch.cuda.is_available():\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\ndef setup_seed(seed):\n    torch.manual_seed(seed)\n    torch.cuda.manual_seed_all(seed)\n    np.random.seed(seed)\n    #  random.seed(seed)\n    torch.backends.cudnn.deterministic = True\n\nsetup_seed(124)\ndtype = torch.double",
   "metadata": {
    "id": "22f06fa7a5c843d0",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:41.905543Z",
     "iopub.execute_input": "2025-01-06T08:14:41.905990Z",
     "iopub.status.idle": "2025-01-06T08:14:42.016924Z",
     "shell.execute_reply.started": "2025-01-06T08:14:41.905958Z",
     "shell.execute_reply": "2025-01-06T08:14:42.016070Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-22T09:48:32.325704Z",
     "start_time": "2025-02-22T09:48:32.311411Z"
    }
   },
   "execution_count": 2,
   "outputs": []
  },
  {
   "id": "537af5474ace18a7",
   "cell_type": "code",
   "source": "#求导\ndef gradients(outputs, inputs):\n    return torch.autograd.grad(outputs, inputs, grad_outputs=torch.ones_like(outputs), create_graph=True)\n#类型转换\ndef to_numpy(input):\n    if isinstance(input, torch.Tensor):\n        return input.detach().cpu().numpy()\n    elif isinstance(input, np.ndarray):\n        return input\n",
   "metadata": {
    "id": "b889c1638ad8e2ca",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:42.018439Z",
     "iopub.execute_input": "2025-01-06T08:14:42.018771Z",
     "iopub.status.idle": "2025-01-06T08:14:42.034593Z",
     "shell.execute_reply.started": "2025-01-06T08:14:42.018743Z",
     "shell.execute_reply": "2025-01-06T08:14:42.033671Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-22T09:48:32.341785Z",
     "start_time": "2025-02-22T09:48:32.326819Z"
    }
   },
   "execution_count": 3,
   "outputs": []
  },
  {
   "id": "fb73e0360769697",
   "cell_type": "code",
   "source": "#构建网络layers\ndef Dnnlayers(input_layers,output_layers,hidden_layers,neural):\n    layers = []\n    for i in range(hidden_layers + 2):\n        if i == 0:\n            layers.append(input_layers)\n        elif i == hidden_layers + 1:\n            layers.append(output_layers)\n        else:\n            layers.append(neural)\n    return layers",
   "metadata": {
    "id": "47c947d73de73894",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:42.035127Z",
     "iopub.execute_input": "2025-01-06T08:14:42.035389Z",
     "iopub.status.idle": "2025-01-06T08:14:42.053208Z",
     "shell.execute_reply.started": "2025-01-06T08:14:42.035347Z",
     "shell.execute_reply": "2025-01-06T08:14:42.052317Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-22T09:48:32.357673Z",
     "start_time": "2025-02-22T09:48:32.342837Z"
    }
   },
   "execution_count": 4,
   "outputs": []
  },
  {
   "id": "2174cd9ee161fb50",
   "cell_type": "code",
   "source": [
    "class DNN(nn.Module):\n",
    "    def __init__(self, layers):\n",
    "        #继承父类\n",
    "        super(DNN, self).__init__()\n",
    "\n",
    "        #depth of network\n",
    "        self.depth = len(layers) - 1\n",
    "        #print(self.depth)\n",
    "\n",
    "        #activation of network\n",
    "        self.activation = nn.Tanh()\n",
    "        self.positive_activation = nn.Softplus()\n",
    "        #create the neural network\n",
    "        layers_list = list()\n",
    "        for i in range(self.depth - 1):\n",
    "            layers_list.append(\n",
    "                ('layer_%d' % i, nn.Linear(layers[i], layers[i+1]))#create each network\n",
    "            )\n",
    "            layers_list.append(\n",
    "                ('activation_%d' % i, self.activation)  #create each activation of network\n",
    "            )\n",
    "        layers_list.append(\n",
    "            ('layer_%d' % (self.depth - 1), nn.Linear(layers[-2], layers[-1]))\n",
    "            #last network do not have activation\n",
    "        )\n",
    "        #创建一个有序字典，其中包含了从 layers_list 中获得的键值对，这在需要保持元素顺序的场景（如神经网络层的顺序）中非常有用。\n",
    "        layerDict = OrderedDict(layers_list)\n",
    "\n",
    "        #deploy layers\n",
    "        self.layers = nn.Sequential(layerDict)\n",
    "        #print(self.layers)\n",
    "\n",
    "    #forword network,output the result of network\n",
    "    def forward(self, x):\n",
    "        #out = torch.exp(self.layers(x))\n",
    "        out = self.layers(x)\n",
    "        out = self.positive_activation(out)  # 这里使用 Softplu#ln(e^x+1)\n",
    "        #out = self.positive_activation(out)\n",
    "        return out"
   ],
   "metadata": {
    "id": "b9cbfc5db47f07de",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:45.051897Z",
     "iopub.execute_input": "2025-01-06T08:14:45.052219Z",
     "iopub.status.idle": "2025-01-06T08:14:45.059090Z",
     "shell.execute_reply.started": "2025-01-06T08:14:45.052195Z",
     "shell.execute_reply": "2025-01-06T08:14:45.057976Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-22T09:48:32.373132Z",
     "start_time": "2025-02-22T09:48:32.359748Z"
    }
   },
   "execution_count": 5,
   "outputs": []
  },
  {
   "id": "e1cc56f6145a1fc8",
   "cell_type": "code",
   "source": [
    "class Net_2_for_p():\n",
    "    def __init__(self, layers):\n",
    "        self.dnn = DNN(layers ).double().to(device)\n",
    "    def loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "        rho = rho_e[:, 0:1]\n",
    "        p = self.dnn.forward(rho_e)\n",
    "        dp_g = gradients(p, rho_e)[0]\n",
    "        p_rho, p_e = dp_g[:, :1], dp_g[:, 1:]\n",
    "        #\n",
    "        p_x = p_rho * rho_x + p_e * e_x\n",
    "\n",
    "        f = ((rho * u_t + rho * u * u_x + p_x)**2).mean() + ((rho * e_t + rho * u * e_x + p * u_x)**2).mean()\n",
    "        return f\n",
    "    def loss_data(self, rho_e, p_exact):\n",
    "        p = self.dnn.forward(rho_e)\n",
    "        loss_data = ((p - p_exact) ** 2).mean()\n",
    "        return loss_data"
   ],
   "metadata": {
    "id": "1c2ec9e0089cb4d6",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:45.483395Z",
     "iopub.execute_input": "2025-01-06T08:14:45.483709Z",
     "iopub.status.idle": "2025-01-06T08:14:45.491155Z",
     "shell.execute_reply.started": "2025-01-06T08:14:45.483684Z",
     "shell.execute_reply": "2025-01-06T08:14:45.490159Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-22T09:48:32.389087Z",
     "start_time": "2025-02-22T09:48:32.374437Z"
    }
   },
   "execution_count": 6,
   "outputs": []
  },
  {
   "id": "51aec9941bfa4287",
   "cell_type": "code",
   "source": [
    "#loss_ic_e(self, rho_p, e_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case1_predict.mat')  # Import Solution data\n",
    "predict_rho1 = np.real(data['rho']).T  # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "predict_e1 = np.real(data['e']).T\n",
    "predict_u1 = np.real(data['u']).T\n",
    "predict_rho_x1 = np.real(data['rho_x']).T\n",
    "predict_e_x1 = np.real(data['e_x']).T\n",
    "predict_u_x1 = np.real(data['u_x']).T\n",
    "predict_rho_t1 = np.real(data['rho_t']).T\n",
    "predict_u_t1 = np.real(data['u_t']).T\n",
    "predict_e_t1 = np.real(data['e_t']).T\n",
    "#loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "num_f_train = 8000\n",
    "id_f = np.random.choice(len(predict_rho1) * len(predict_rho1[0]), num_f_train, replace=False)\n",
    "rho1 = predict_rho1.flatten()[id_f, None]\n",
    "e1 = predict_e1.flatten()[id_f, None]\n",
    "rho_up1 = to_numpy(rho1).max(0)\n",
    "rho_lb1 = to_numpy(rho1).min(0)\n",
    "e_up1 = to_numpy(e1).max(0)\n",
    "e_lb1 = to_numpy(e1).min(0)\n",
    "rho_e1 = np.hstack([rho1, e1])\n",
    "u1 = predict_u1.flatten()[id_f, None]\n",
    "rho_x1 = predict_rho_x1.flatten()[id_f, None]\n",
    "u_x1 = predict_u_x1.flatten()[id_f, None]\n",
    "e_x1 = predict_e_x1.flatten()[id_f, None]\n",
    "rho_t1 = predict_rho_t1.flatten()[id_f, None]\n",
    "u_t1 = predict_u_t1.flatten()[id_f, None]\n",
    "e_t1 = predict_e_t1.flatten()[id_f, None]\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case1.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T  # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "print('train_eos1_case1.shape',Exact_rho.shape)\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train1 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train1 = np.vstack([rho_e_data_train1, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact1 = Exact_p[:, 0:1]\n",
    "p_exact1 = np.vstack([p_exact1, Exact_p[:, -1:]])\n",
    "\n",
    "data = scipy.io.loadmat(r'train_eos1_case2_predict.mat')  # Import Solution data\n",
    "predict_rho2 = np.real(data['rho']).T\n",
    "predict_e2 = np.real(data['e']).T\n",
    "predict_u2 = np.real(data['u']).T\n",
    "predict_rho_x2 = np.real(data['rho_x']).T\n",
    "predict_e_x2 = np.real(data['e_x']).T\n",
    "predict_u_x2 = np.real(data['u_x']).T\n",
    "predict_rho_t2 = np.real(data['rho_t']).T\n",
    "predict_e_t2 = np.real(data['e_t']).T\n",
    "predict_u_t2 = np.real(data['u_t']).T\n",
    "#loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "num_f_train = 8000\n",
    "id_f = np.random.choice(len(predict_rho2) * len(predict_rho2[0]), num_f_train, replace=False)\n",
    "rho2 = predict_rho2.flatten()[id_f, None]\n",
    "e2 = predict_e2.flatten()[id_f, None]\n",
    "rho_up2 = to_numpy(rho2).max(0)\n",
    "rho_lb2 = to_numpy(rho2).min(0)\n",
    "e_up2 = to_numpy(e2).max(0)\n",
    "e_lb2 = to_numpy(e2).min(0)\n",
    "rho_e2 = np.hstack([rho2, e2])\n",
    "u2 = predict_u2.flatten()[id_f, None]\n",
    "rho_x2 = predict_rho_x2.flatten()[id_f, None]\n",
    "u_x2 = predict_u_x2.flatten()[id_f, None]\n",
    "e_x2 = predict_e_x2.flatten()[id_f, None]\n",
    "rho_t2 = predict_rho_t2.flatten()[id_f, None]\n",
    "u_t2 = predict_u_t2.flatten()[id_f, None]\n",
    "e_t2 = predict_e_t2.flatten()[id_f, None]\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case2.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T  # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "print('train_eos1_case2.shape',Exact_rho.shape)\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train2 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train2 = np.vstack([rho_e_data_train2, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact2 = Exact_p[:, 0:1]\n",
    "p_exact2 = np.vstack([p_exact2, Exact_p[:, -1:]])\n",
    "\n",
    "data = scipy.io.loadmat(r'train_eos1_case3_predict.mat')\n",
    "predict_rho3 = np.real(data['rho']).T\n",
    "predict_e3 = np.real(data['e']).T\n",
    "predict_u3 = np.real(data['u']).T\n",
    "predict_rho_x3 = np.real(data['rho_x']).T\n",
    "predict_e_x3 = np.real(data['e_x']).T\n",
    "predict_u_x3 = np.real(data['u_x']).T\n",
    "predict_rho_t3 = np.real(data['rho_t']).T\n",
    "predict_u_t3 = np.real(data['u_t']).T\n",
    "predict_e_t3 = np.real(data['e_t']).T\n",
    "#loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "num_f_train = 8000\n",
    "id_f = np.random.choice(len(predict_rho3) * len(predict_rho3[0]), num_f_train, replace=False)\n",
    "rho3 = predict_rho3.flatten()[id_f, None]\n",
    "e3 = predict_e3.flatten()[id_f, None]\n",
    "rho_up3 = to_numpy(rho3).max(0)\n",
    "rho_lb3 = to_numpy(rho3).min(0)\n",
    "e_up3 = to_numpy(e3).max(0)\n",
    "e_lb3 = to_numpy(e3).min(0)\n",
    "rho_e3 = np.hstack([rho3, e3])\n",
    "u3 = predict_u3.flatten()[id_f, None]\n",
    "rho_x3 = predict_rho_x3.flatten()[id_f, None]\n",
    "u_x3 = predict_u_x3.flatten()[id_f, None]\n",
    "e_x3 = predict_e_x3.flatten()[id_f, None]\n",
    "rho_t3 = predict_rho_t3.flatten()[id_f, None]\n",
    "u_t3 = predict_u_t3.flatten()[id_f, None]\n",
    "e_t3 = predict_e_t3.flatten()[id_f, None]\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case3.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T  # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "print('train_eos1_case3.shape',Exact_rho.shape)\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train3 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train3 = np.vstack([rho_e_data_train3, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact3 = Exact_p[:, 0:1]\n",
    "p_exact3 = np.vstack([p_exact3, Exact_p[:, -1:]])\n",
    "\n",
    "data = scipy.io.loadmat(r'train_eos1_case4_predict.mat')\n",
    "predict_rho4 = np.real(data['rho']).T\n",
    "predict_e4 = np.real(data['e']).T\n",
    "predict_u4 = np.real(data['u']).T#u(x,t)\n",
    "predict_rho_x4 = np.real(data['rho_x']).T\n",
    "predict_e_x4 = np.real(data['e_x']).T\n",
    "predict_u_x4 = np.real(data['u_x']).T\n",
    "predict_rho_t4 = np.real(data['rho_t']).T\n",
    "predict_u_t4 = np.real(data['u_t']).T\n",
    "predict_e_t4 = np.real(data['e_t']).T\n",
    "#取初值和末态值\n",
    "#loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "num_f_train = 8000\n",
    "id_f = np.random.choice(len(predict_rho4) * len(predict_rho4[0]), num_f_train, replace=False)\n",
    "rho4 = predict_rho4.flatten()[id_f, None]\n",
    "e4 = predict_e4.flatten()[id_f, None]\n",
    "rho_up4 = to_numpy(rho4).max(0)\n",
    "rho_lb4 = to_numpy(rho4).min(0)\n",
    "e_up4 = to_numpy(e4).max(0)\n",
    "e_lb4 = to_numpy(e4).min(0)\n",
    "rho_e4 = np.hstack([rho4, e4])\n",
    "u4 = predict_u4.flatten()[id_f, None]\n",
    "rho_x4 = predict_rho_x4.flatten()[id_f, None]\n",
    "u_x4 = predict_u_x4.flatten()[id_f, None]\n",
    "e_x4 = predict_e_x4.flatten()[id_f, None]\n",
    "rho_t4 = predict_rho_t4.flatten()[id_f, None]\n",
    "u_t4 = predict_u_t4.flatten()[id_f, None]\n",
    "e_t4 = predict_e_t4.flatten()[id_f, None]\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case4.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T  # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "print('train_eos1_case4.shape',Exact_rho.shape)\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train4 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train4 = np.vstack([rho_e_data_train4, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact4 = Exact_p[:, 0:1]\n",
    "p_exact4 = np.vstack([p_exact4, Exact_p[:, -1:]])\n",
    "\n",
    "data = scipy.io.loadmat(r'train_eos1_case5_predict.mat')\n",
    "predict_rho5 = np.real(data['rho']).T\n",
    "predict_e5 = np.real(data['e']).T\n",
    "predict_u5 = np.real(data['u']).T#u(x,t)\n",
    "predict_rho_x5 = np.real(data['rho_x']).T\n",
    "predict_e_x5 = np.real(data['e_x']).T\n",
    "predict_u_x5 = np.real(data['u_x']).T\n",
    "predict_rho_t5 = np.real(data['rho_t']).T\n",
    "predict_u_t5 = np.real(data['u_t']).T\n",
    "predict_e_t5 = np.real(data['e_t']).T\n",
    "#取初值和末态值\n",
    "#loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "num_f_train = 8000\n",
    "id_f = np.random.choice(len(predict_rho5) * len(predict_rho5[0]), num_f_train, replace=False)\n",
    "rho5 = predict_rho5.flatten()[id_f, None]\n",
    "e5 = predict_e5.flatten()[id_f, None]\n",
    "rho_up5 = to_numpy(rho5).max(0)\n",
    "rho_lb5 = to_numpy(rho5).min(0)\n",
    "e_up5 = to_numpy(e5).max(0)\n",
    "e_lb5 = to_numpy(e5).min(0)\n",
    "rho_e5 = np.hstack([rho5, e5])\n",
    "u5 = predict_u5.flatten()[id_f, None]\n",
    "rho_x5 = predict_rho_x5.flatten()[id_f, None]\n",
    "u_x5 = predict_u_x5.flatten()[id_f, None]\n",
    "e_x5 = predict_e_x5.flatten()[id_f, None]\n",
    "rho_t5 = predict_rho_t5.flatten()[id_f, None]\n",
    "u_t5 = predict_u_t5.flatten()[id_f, None]\n",
    "e_t5 = predict_e_t5.flatten()[id_f, None]\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case5.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T  # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "print('train_eos1_case5.shape',Exact_rho.shape)\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train5 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train5 = np.vstack([rho_e_data_train5, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact5 = Exact_p[:, 0:1]\n",
    "p_exact5 = np.vstack([p_exact5, Exact_p[:, -1:]])"
   ],
   "metadata": {
    "id": "21403d36304d76bf",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:50.146319Z",
     "iopub.execute_input": "2025-01-06T08:14:50.146673Z",
     "iopub.status.idle": "2025-01-06T08:14:50.590268Z",
     "shell.execute_reply.started": "2025-01-06T08:14:50.146646Z",
     "shell.execute_reply": "2025-01-06T08:14:50.589177Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-22T09:50:18.917832Z",
     "start_time": "2025-02-22T09:50:18.632186Z"
    }
   },
   "execution_count": 7,
   "outputs": []
  },
  {
   "id": "a0f43964f4b5f6f9",
   "cell_type": "code",
   "source": [
    "#loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "u = np.vstack([u1, u2, u3, u4, u5])\n",
    "rho_x = np.vstack([rho_x1, rho_x2, rho_x3, rho_x4, rho_x5])\n",
    "u_x = np.vstack([u_x1, u_x2, u_x3, u_x4, u_x5])\n",
    "e_x = np.vstack([e_x1, e_x2, e_x3, e_x4, e_x5])\n",
    "rho_t = np.vstack([rho_t1, rho_t2, rho_t3, rho_t4, rho_t5])\n",
    "u_t = np.vstack([u_t1, u_t2, u_t3, u_t4, u_t5])\n",
    "e_t = np.vstack([e_t1, e_t2, e_t3, e_t4, e_t5])\n",
    "rho_e = np.vstack([rho_e1, rho_e2, rho_e3, rho_e4, rho_e5])\n",
    "'''num_f_train = 10000\n",
    "id_f = np.random.choice(len(rho_e), num_f_train, replace=False)\n",
    "u = u[id_f, :]\n",
    "rho_x = rho_x[id_f, :]\n",
    "u_x = u_x[id_f, :]\n",
    "e_x = e_x[id_f, :]\n",
    "e_t = e_t[id_f, :]\n",
    "rho_t = rho_t[id_f, :]\n",
    "u_t = u_t[id_f, :]\n",
    "rho_e = rho_e[id_f, :]'''\n",
    "#tensor化\n",
    "rho_e = torch.tensor(rho_e, requires_grad=True, dtype=dtype, device=device)\n",
    "u = torch.tensor(u, dtype=dtype, device=device)\n",
    "rho_x = torch.tensor(rho_x, dtype=dtype, device=device)\n",
    "u_x = torch.tensor(u_x, dtype=dtype, device=device)\n",
    "e_x = torch.tensor(e_x, dtype=dtype, device=device)\n",
    "rho_t = torch.tensor(rho_t, dtype=dtype, device=device)\n",
    "u_t = torch.tensor(u_t, dtype=dtype, device=device)\n",
    "e_t = torch.tensor(e_t, dtype=dtype, device=device)\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "rho_e_data_train = np.vstack([rho_e_data_train1, rho_e_data_train2, rho_e_data_train3, rho_e_data_train4, rho_e_data_train5])\n",
    "p_exact = np.vstack([p_exact1, p_exact2, p_exact3, p_exact4, p_exact5])\n",
    "#tensor化\n",
    "rho_e_data_train = torch.tensor(rho_e_data_train, dtype=dtype, device=device, requires_grad=True)\n",
    "p_exact = torch.tensor(p_exact, dtype=dtype, device=device)\n"
   ],
   "metadata": {
    "id": "c4159d96c493d922",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:55.463332Z",
     "iopub.execute_input": "2025-01-06T08:14:55.463665Z",
     "iopub.status.idle": "2025-01-06T08:14:55.767002Z",
     "shell.execute_reply.started": "2025-01-06T08:14:55.463641Z",
     "shell.execute_reply": "2025-01-06T08:14:55.765961Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-07T12:55:25.179864Z",
     "start_time": "2025-02-07T12:55:25.144525Z"
    }
   },
   "execution_count": 9,
   "outputs": []
  },
  {
   "id": "93e6cfe738569657",
   "cell_type": "code",
   "source": [
    "layers = Dnnlayers(2, 1, 1, 20)\n",
    "model = Net_2_for_p(layers)\n",
    "loss_total_history = []\n",
    "loss_pde_history = []\n",
    "loss_data_history = []\n",
    "lr = 0.001\n",
    "optimizer = torch.optim.Adam(list(model.dnn.parameters()), weight_decay=0.0001, lr=lr)"
   ],
   "metadata": {
    "id": "c9404843e26f35f",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:14:58.998629Z",
     "iopub.execute_input": "2025-01-06T08:14:58.998923Z",
     "iopub.status.idle": "2025-01-06T08:15:00.183438Z",
     "shell.execute_reply.started": "2025-01-06T08:14:58.998900Z",
     "shell.execute_reply": "2025-01-06T08:15:00.182547Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-01-15T09:42:26.818274Z",
     "start_time": "2025-01-15T09:42:26.813725Z"
    }
   },
   "execution_count": 9,
   "outputs": []
  },
  {
   "id": "80b19914db59a7f8",
   "cell_type": "code",
   "source": [
    "#training\n",
    "def train(epoch):\n",
    "    model.dnn.train()\n",
    "    def closure():\n",
    "        optimizer.zero_grad()\n",
    "        #loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "        loss_pde = model.loss_pde(rho_e, u, rho_x, u_x, e_x, u_t, e_t)\n",
    "        #loss_data(self, rho_p, e_exact)\n",
    "        loss_data = model.loss_data(rho_e_data_train, p_exact)\n",
    "        #total loss\n",
    "        loss =  loss_pde + 10*loss_data#\n",
    "        if epoch%2==0 :\n",
    "            print(f'train: epoch:{epoch},  loss: {loss:.8f}, loss_pde:{loss_pde:.8f}, loss_data:{loss_data:.8f}')\n",
    "\n",
    "        loss_total_history.append(to_numpy(loss))\n",
    "        loss_pde_history.append(to_numpy(loss_pde))\n",
    "        loss_data_history.append(to_numpy(loss_data))\n",
    "\n",
    "        loss.backward()\n",
    "        return loss\n",
    "    loss = optimizer.step(closure)"
   ],
   "metadata": {
    "id": "e89e4d23cc588936",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:15:02.807572Z",
     "iopub.execute_input": "2025-01-06T08:15:02.808017Z",
     "iopub.status.idle": "2025-01-06T08:15:02.813672Z",
     "shell.execute_reply.started": "2025-01-06T08:15:02.807985Z",
     "shell.execute_reply": "2025-01-06T08:15:02.812831Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-01-15T09:42:27.628247Z",
     "start_time": "2025-01-15T09:42:27.621729Z"
    }
   },
   "execution_count": 10,
   "outputs": []
  },
  {
   "id": "ef089b9379e8beb7",
   "cell_type": "code",
   "source": "epochs1 = 15000\ntic = time.time()\nfor epoch in range(1, epochs1+1):\n    train(epoch)\ntoc = time.time()",
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "de0b8364cf6e49b9",
    "outputId": "70b1fef2-6a57-4fbd-9806-e99a6656a369",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:15:03.055262Z",
     "iopub.execute_input": "2025-01-06T08:15:03.055582Z",
     "iopub.status.idle": "2025-01-06T08:16:14.428995Z",
     "shell.execute_reply.started": "2025-01-06T08:15:03.055558Z",
     "shell.execute_reply": "2025-01-06T08:16:14.427995Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-01-15T09:46:54.980035Z",
     "start_time": "2025-01-15T09:42:29.214199Z"
    }
   },
   "execution_count": 11,
   "outputs": []
  },
  {
   "id": "1740343e52cff92c",
   "cell_type": "code",
   "source": [
    "rho_up = max(rho_up1,rho_up2,rho_up3,rho_up4, rho_up5)\n",
    "rho_lb = min(rho_lb1,rho_lb2,rho_lb3,rho_lb4, rho_lb5)\n",
    "e_up = max(e_up1, e_up2, e_up3, e_up4, e_up5)\n",
    "e_lb = min(e_lb1, e_lb2, e_lb3, e_lb4, e_lb5)\n",
    "print(rho_up,rho_lb,e_up,e_lb)\n",
    "size1 = 200\n",
    "size2 = 300\n",
    "rho = np.linspace(rho_lb, rho_up, size1)\n",
    "e = np.linspace(e_lb, e_up, size2)\n",
    "rho_grid, e_grid = np.meshgrid(rho, e)\n",
    "rho_e_test = np.hstack((rho_grid.flatten()[:,None], e_grid.flatten()[:,None]))\n",
    "p_test_exact = rho_e_test[:, -1:] * rho_e_test[:, 0:1] * 0.4\n",
    "p_exact_rho = rho_e_test[:, -1:] * 0.4\n",
    "p_exact_e = rho_e_test[:, 0:1] * 0.4\n",
    "\n",
    "rho_e_test = torch.tensor(rho_e_test, requires_grad=True, dtype=dtype).to(device)\n",
    "p_predict = (model.dnn.forward(rho_e_test))\n",
    "\n",
    "dp_predict = gradients(p_predict, rho_e_test)[0]\n",
    "p_predict_rho, p_predict_e = dp_predict[:, :1], dp_predict[:, 1:]\n",
    "\n",
    "\n",
    "error_p_test = abs(to_numpy(p_predict) - to_numpy(p_test_exact))\n",
    "error_p_test_rho = abs(to_numpy(p_predict_rho) - to_numpy(p_exact_rho))\n",
    "error_p_test_e = abs(to_numpy(p_predict_e) - to_numpy(p_exact_e))\n",
    "\n",
    "\n",
    "plt.figure()\n",
    "plt.contour(rho_grid, e_grid, to_numpy(p_predict.reshape(size2,size1)),levels=3000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_predict')\n",
    "plt.show()\n",
    "plt.figure()\n",
    "plt.contour(rho_grid, e_grid, to_numpy(p_test_exact).reshape(size2,size1),levels=3000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_exact')\n",
    "plt.show()\n",
    "plt.figure()\n",
    "plt.contour(rho_grid, e_grid, error_p_test.reshape(size2,size1),levels=50000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('error_p_test')\n",
    "plt.savefig(r\"Net2_p_test_error.png\")\n",
    "plt.show()\n",
    "#\n",
    "eporchx = np.arange(1, len(loss_pde_history)+1)\n",
    "plt.figure()\n",
    "plt.plot(eporchx, loss_pde_history, label='loss_pde_history')\n",
    "plt.plot(eporchx, loss_data_history, label='loss_data_history')\n",
    "plt.plot(eporchx, loss_total_history, label='loss_total_history')\n",
    "plt.yscale('log')\n",
    "plt.title('loss with eproch')\n",
    "plt.legend()\n",
    "plt.show()\n",
    "#\n",
    "print('MSE error for every var')\n",
    "print('error_p_test is ',(error_p_test**2).mean())\n",
    "print('error_p_test_rho is ',(error_p_test_rho**2).mean())\n",
    "print('error_p_test_e is ',(error_p_test_e**2).mean())\n",
    "print('*****************************************')\n",
    "print('L2 error for every var')\n",
    "print('error_p_test is ',np.sqrt((error_p_test**2).mean()))\n",
    "print('error_p_test_rho is ',np.sqrt((error_p_test_rho**2).mean()))\n",
    "print('error_p_test_e is ',np.sqrt((error_p_test_e**2).mean()))\n",
    "print('*****************************************')\n",
    "print('relative error for every var')\n",
    "print('error_p_test is ',np.sqrt(np.linalg.norm(error_p_test, ord=2)/np.linalg.norm(p_exact, ord=2)))\n",
    "print('error_p_test_rho is ',np.sqrt(np.linalg.norm(error_p_test_rho, ord=2)/np.linalg.norm(p_exact_rho, ord=2)))\n",
    "print('error_p_test_e is ',np.sqrt(np.linalg.norm(error_p_test_e, ord=2)/np.linalg.norm(p_exact_e, ord=2)))\n",
    "print(max(p_exact))\n",
    "print(max(p_predict))\n",
    "print(min(p_exact))\n",
    "print(min(p_predict))\n",
    "'''\n",
    "MSE error for every var\n",
    "error_e_test is  0.00541845901792049\n",
    "*****************************************\n",
    "L2 error for every var\n",
    "error_e_test is  0.073610182841238\n",
    "*****************************************\n",
    "relative error for every var\n",
    "error_e_test is  0.1206453541662053\n",
    "'''"
   ],
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "id": "ecf0160acaac6023",
    "outputId": "4ecd79dd-e9d6-4c73-ef82-183c2c57089b",
    "execution": {
     "iopub.status.busy": "2025-01-06T08:16:14.430082Z",
     "iopub.execute_input": "2025-01-06T08:16:14.430412Z",
     "iopub.status.idle": "2025-01-06T08:18:29.929740Z",
     "shell.execute_reply.started": "2025-01-06T08:16:14.430387Z",
     "shell.execute_reply": "2025-01-06T08:18:29.928379Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-01-15T09:48:37.431710Z",
     "start_time": "2025-01-15T09:47:21.538209Z"
    }
   },
   "execution_count": 13,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T09:49:25.973190Z",
     "start_time": "2025-01-15T09:48:37.432770Z"
    }
   },
   "cell_type": "code",
   "source": [
    "optimizer = torch.optim.LBFGS(model.dnn.parameters(), lr=1.0, max_iter=20)\n",
    "epochs2 = 2000\n",
    "tic = time.time()\n",
    "for epoch in range(1, epochs2+1):\n",
    "    train(epoch)\n",
    "toc = time.time()"
   ],
   "id": "3e6e8035ff60fbdd",
   "execution_count": 14,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-07T12:55:52.966526Z",
     "start_time": "2025-02-07T12:55:52.368351Z"
    }
   },
   "cell_type": "code",
   "source": [
    "rho_up = max(rho_up1,rho_up2,rho_up3,rho_up4, rho_up5)\n",
    "rho_lb = min(rho_lb1,rho_lb2,rho_lb3,rho_lb4, rho_lb5)\n",
    "e_up = max(e_up1, e_up2, e_up3, e_up4, e_up5)\n",
    "e_lb = min(e_lb1, e_lb2, e_lb3, e_lb4, e_lb5)\n",
    "print(rho_up,rho_lb,e_up,e_lb)\n",
    "size1 = 200\n",
    "size2 = 300\n",
    "rho = np.linspace(rho_lb, rho_up, size1)\n",
    "e = np.linspace(e_lb, e_up, size2)\n",
    "rho_grid, e_grid = np.meshgrid(rho, e)\n",
    "rho_e_test = np.hstack((rho_grid.flatten()[:,None], e_grid.flatten()[:,None]))\n",
    "p_test_exact = rho_e_test[:, -1:] * rho_e_test[:, 0:1] * 0.4\n",
    "p_exact_rho = rho_e_test[:, -1:] * 0.4\n",
    "p_exact_e = rho_e_test[:, 0:1] * 0.4\n",
    "\n",
    "rho_e_test = torch.tensor(rho_e_test, requires_grad=True, dtype=dtype).to(device)\n",
    "p_predict = (model.dnn.forward(rho_e_test))\n",
    "\n",
    "dp_predict = gradients(p_predict, rho_e_test)[0]\n",
    "p_predict_rho, p_predict_e = dp_predict[:, :1], dp_predict[:, 1:]\n",
    "\n",
    "\n",
    "error_p_test = abs(to_numpy(p_predict) - to_numpy(p_test_exact))\n",
    "error_p_test_rho = abs(to_numpy(p_predict_rho) - to_numpy(p_exact_rho))\n",
    "error_p_test_e = abs(to_numpy(p_predict_e) - to_numpy(p_exact_e))\n",
    "\n",
    "\n",
    "plt.figure()\n",
    "plt.contour(rho_grid, e_grid, to_numpy(p_predict.reshape(size2,size1)),levels=5000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_predict')\n",
    "plt.show()\n",
    "plt.figure()\n",
    "plt.contour(rho_grid, e_grid, to_numpy(p_test_exact).reshape(size2,size1),levels=5000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_exact')\n",
    "plt.show()\n",
    "plt.figure()\n",
    "plt.contour(rho_grid, e_grid, error_p_test.reshape(size2,size1),levels=300000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('error_p_test')\n",
    "plt.savefig(r\"Net2_p_test_error.png\")\n",
    "plt.show()\n",
    "#\n",
    "eporchx = np.arange(1, len(loss_pde_history)+1)\n",
    "plt.plot(eporchx, loss_pde_history, label='loss_pde_history')\n",
    "plt.plot(eporchx, loss_data_history, label='loss_data_history')\n",
    "plt.plot(eporchx, loss_total_history, label='loss_total_history')\n",
    "plt.yscale('log')\n",
    "plt.title('loss with eproch')\n",
    "plt.legend()\n",
    "plt.show()\n",
    "#\n",
    "print('MSE error for every var')\n",
    "print('error_p_test is ',(error_p_test**2).mean())\n",
    "print('error_p_test_rho is ',(error_p_test_rho**2).mean())\n",
    "print('error_p_test_e is ',(error_p_test_e**2).mean())\n",
    "print('*****************************************')\n",
    "print('L2 error for every var')\n",
    "print('error_p_test is ',np.sqrt((error_p_test**2).mean()))\n",
    "print('error_p_test_rho is ',np.sqrt((error_p_test_rho**2).mean()))\n",
    "print('error_p_test_e is ',np.sqrt((error_p_test_e**2).mean()))\n",
    "print('*****************************************')\n",
    "print('relative error for every var')\n",
    "print('error_p_test is ',np.sqrt(np.linalg.norm(error_p_test, ord=2)/np.linalg.norm(p_exact, ord=2)))\n",
    "print('error_p_test_rho is ',np.sqrt(np.linalg.norm(error_p_test_rho, ord=2)/np.linalg.norm(p_exact_rho, ord=2)))\n",
    "print('error_p_test_e is ',np.sqrt(np.linalg.norm(error_p_test_e, ord=2)/np.linalg.norm(p_exact_e, ord=2)))\n",
    "print(max(p_exact))\n",
    "print(max(p_predict))\n",
    "print(min(p_exact))\n",
    "print(min(p_predict))\n",
    "'''\n",
    "MSE error for every var\n",
    "error_e_test is  0.00541845901792049\n",
    "*****************************************\n",
    "L2 error for every var\n",
    "error_e_test is  0.073610182841238\n",
    "*****************************************\n",
    "relative error for every var\n",
    "error_e_test is  0.1206453541662053\n",
    "'''"
   ],
   "id": "82761a3cfb007d46",
   "execution_count": 10,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T10:09:17.943856Z",
     "start_time": "2025-01-15T10:09:17.929678Z"
    }
   },
   "cell_type": "code",
   "source": "torch.save(model, 'Net2_eos1_model.pth')",
   "id": "63865619ae3084f8",
   "execution_count": 16,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T10:09:17.958830Z",
     "start_time": "2025-01-15T10:09:17.944946Z"
    }
   },
   "cell_type": "code",
   "source": [
    "with open('model_parameters.txt', 'w') as f:\n",
    "    for name, param in model.dnn.named_parameters():\n",
    "        if param.grad is not None:\n",
    "            # 写入变量名作为注释行\n",
    "            f.write('#' + name + '\\n')\n",
    "            # 写入形状信息，使用括号包裹形状，便于Fortran解析\n",
    "            f.write(f\"({','.join(map(str, param.shape))})\\n\")\n",
    "            # 将参数转换为numpy数组并保存\n",
    "            param_np = param.detach().numpy()\n",
    "            # 保存参数和梯度，指定格式以便于Fortran读取\n",
    "            np.savetxt(f, param_np, fmt='%15.8e')  # 使用科学记数法，每列宽度15，精度8\n",
    "            f.write('\\n')  # 每个张量之间空一行"
   ],
   "id": "d874ba7d96be2b3d",
   "execution_count": 17,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T10:09:17.974871Z",
     "start_time": "2025-01-15T10:09:17.959832Z"
    }
   },
   "cell_type": "code",
   "source": [
    "'''params = model.dnn.state_dict()\n",
    "with open('model_parameters.txt', 'w') as f:\n",
    "    for key in params:\n",
    "        # 写入每一层的名字作为注释\n",
    "        f.write('#' + key + '\\n')\n",
    "        tensor = params[key].numpy()  # 转换为numpy数组\n",
    "        shape = tensor.shape  # 获取形状信息\n",
    "        f.write(str(shape) + '\\n')  # 写入形状信息\n",
    "        tensor.tofile(f, sep=\" \", format=\"%s\")  # 将数据写入文件\n",
    "        f.write('\\n')  # 每个张量之间空一行'''"
   ],
   "id": "1be2774fd20e6448",
   "execution_count": 18,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T10:09:17.990939Z",
     "start_time": "2025-01-15T10:09:17.975875Z"
    }
   },
   "cell_type": "code",
   "source": [
    "with open('model_parameters_and_gradients.txt', 'w') as f:\n",
    "    for name, param in model.dnn.named_parameters():\n",
    "        if param.grad is not None:\n",
    "            # 写入变量名作为注释行\n",
    "            f.write('#' + name + '\\n')\n",
    "            # 写入形状信息，使用括号包裹形状，便于Fortran解析\n",
    "            f.write(f\"({','.join(map(str, param.shape))})\\n\")\n",
    "            # 将参数和梯度转换为numpy数组并保存\n",
    "            param_np = param.detach().numpy()\n",
    "            grad_np = param.grad.detach().numpy()\n",
    "            # 保存参数和梯度，指定格式以便于Fortran读取\n",
    "            np.savetxt(f, param_np, fmt='%15.8e')  # 使用科学记数法，每列宽度15，精度8\n",
    "            f.write('\\n')  # 分隔参数和梯度\n",
    "            np.savetxt(f, grad_np, fmt='%15.8e')\n",
    "            f.write('\\n')  # 每个张量之间空一行"
   ],
   "id": "cd91eac314a163ec",
   "execution_count": 19,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-07T12:56:04.085739Z",
     "start_time": "2025-02-07T12:56:04.067565Z"
    }
   },
   "cell_type": "code",
   "source": [
    "Net2_model = torch.load('Net2_eos1_model.pth', map_location=device)\n",
    "rho = np.linspace(0.1, 1.0, 10)\n",
    "rho = np.linspace(0.0, 0.0, 1)\n",
    "e = np.linspace(0.25, 0.25, 1)\n",
    "rho_grid, e_grid = np.meshgrid(rho, e)\n",
    "rho_e_test = np.hstack((rho_grid.flatten()[:,None], e_grid.flatten()[:,None]))\n",
    "print(rho_e_test)\n",
    "rho_e_test = torch.tensor(rho_e_test, requires_grad=True, dtype=dtype).to(device)\n",
    "p_predict = (Net2_model.dnn.forward(rho_e_test))\n",
    "print(p_predict)"
   ],
   "id": "5374b5a1305b299d",
   "execution_count": 11,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-07T12:57:37.015241Z",
     "start_time": "2025-02-07T12:57:36.970030Z"
    }
   },
   "cell_type": "code",
   "source": [
    "rho_up = max(rho_up1,rho_up2,rho_up3,rho_up4, rho_up5)\n",
    "rho_lb = min(rho_lb1,rho_lb2,rho_lb3,rho_lb4, rho_lb5)\n",
    "e_up = max(e_up1, e_up2, e_up3, e_up4, e_up5)\n",
    "e_lb = min(e_lb1, e_lb2, e_lb3, e_lb4, e_lb5)\n",
    "print(rho_up,rho_lb,e_up,e_lb)\n",
    "size1 = 200\n",
    "size2 = 300\n",
    "rho = np.linspace(rho_lb, rho_up, size1)\n",
    "e = np.linspace(e_lb, e_up, size2)\n",
    "rho_grid, e_grid = np.meshgrid(rho, e)\n",
    "rho_e_test = np.hstack((rho_grid.flatten()[:,None], e_grid.flatten()[:,None]))\n",
    "p_test_exact = rho_e_test[:, -1:] * rho_e_test[:, 0:1] * 0.4\n",
    "p_exact_rho = rho_e_test[:, -1:] * 0.4\n",
    "p_exact_e = rho_e_test[:, 0:1] * 0.4\n",
    "\n",
    "rho_e_test = torch.tensor(rho_e_test, requires_grad=True, dtype=dtype).to(device)\n",
    "p_predict = (Net2_model.dnn.forward(rho_e_test))\n",
    "\n",
    "dp_predict = gradients(p_predict, rho_e_test)[0]\n",
    "p_predict_rho, p_predict_e = dp_predict[:, :1], dp_predict[:, 1:]\n",
    "\n",
    "\n",
    "error_p_test = abs(to_numpy(p_predict) - to_numpy(p_test_exact))\n",
    "error_p_test_rho = abs(to_numpy(p_predict_rho) - to_numpy(p_exact_rho))\n",
    "error_p_test_e = abs(to_numpy(p_predict_e) - to_numpy(p_exact_e))\n",
    "\n",
    "\n",
    "'''plt.figure()\n",
    "plt.contour(rho_grid, e_grid, to_numpy(p_predict.reshape(size2,size1)),levels=5000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_predict')\n",
    "plt.show()\n",
    "plt.figure()\n",
    "plt.contour(rho_grid, e_grid, to_numpy(p_test_exact).reshape(size2,size1),levels=5000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_exact')\n",
    "plt.show()\n",
    "plt.figure()\n",
    "plt.contour(rho_grid, e_grid, error_p_test.reshape(size2,size1),levels=300000)\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('error_p_test')\n",
    "plt.savefig(r\"Net2_p_test_error.png\")\n",
    "plt.show()\n",
    "#\n",
    "eporchx = np.arange(1, len(loss_pde_history)+1)\n",
    "plt.plot(eporchx, loss_pde_history, label='loss_pde_history')\n",
    "plt.plot(eporchx, loss_data_history, label='loss_data_history')\n",
    "plt.plot(eporchx, loss_total_history, label='loss_total_history')\n",
    "plt.yscale('log')\n",
    "plt.title('loss with eproch')\n",
    "plt.legend()\n",
    "plt.show()'''\n",
    "#\n",
    "print('MSE error for every var')\n",
    "print('error_p_test is ',(error_p_test**2).mean())\n",
    "print('error_p_test_rho is ',(error_p_test_rho**2).mean())\n",
    "print('error_p_test_e is ',(error_p_test_e**2).mean())\n",
    "print('*****************************************')\n",
    "print('L2 error for every var')\n",
    "print('error_p_test is ',np.sqrt((error_p_test**2).mean()))\n",
    "print('error_p_test_rho is ',np.sqrt((error_p_test_rho**2).mean()))\n",
    "print('error_p_test_e is ',np.sqrt((error_p_test_e**2).mean()))\n",
    "print('*****************************************')\n",
    "print('relative error for every var')\n",
    "print('error_p_test is ',np.sqrt(np.linalg.norm(error_p_test, ord=2)/np.linalg.norm(p_exact, ord=2)))\n",
    "print('error_p_test_rho is ',np.sqrt(np.linalg.norm(error_p_test_rho, ord=2)/np.linalg.norm(p_exact_rho, ord=2)))\n",
    "print('error_p_test_e is ',np.sqrt(np.linalg.norm(error_p_test_e, ord=2)/np.linalg.norm(p_exact_e, ord=2)))\n",
    "'''\n",
    "MSE error for every var\n",
    "error_p_test is  5.781340596628382e-06\n",
    "error_p_test_rho is  0.0006344236551663998\n",
    "error_p_test_e is  0.00025468171421795483\n",
    "*****************************************\n",
    "L2 error for every var\n",
    "error_p_test is  0.0024044418472128582\n",
    "error_p_test_rho is  0.02518776796713833\n",
    "error_p_test_e is  0.015958750396505197\n",
    "*****************************************\n",
    "relative error for every var\n",
    "error_p_test is  0.11146935721125908\n",
    "error_p_test_rho is  0.17296584292905715\n",
    "error_p_test_e is  0.18416117064059875\n",
    "'''"
   ],
   "id": "bfd503ee9d491a18",
   "execution_count": 14,
   "outputs": []
  },
  {
   "metadata": {},
   "cell_type": "code",
   "execution_count": null,
   "source": "",
   "id": "6ef627e9f3e1153",
   "outputs": []
  }
 ]
}
