{
 "metadata": {
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3",
   "language": "python"
  },
  "language_info": {
   "name": "python",
   "version": "3.10.12",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "colab": {
   "provenance": [],
   "gpuType": "T4"
  },
  "accelerator": "GPU",
  "kaggle": {
   "accelerator": "nvidiaTeslaT4",
   "dataSources": [
    {
     "sourceId": 9068009,
     "sourceType": "datasetVersion",
     "datasetId": 5469316
    },
    {
     "sourceId": 10401331,
     "sourceType": "datasetVersion",
     "datasetId": 6444922
    }
   ],
   "dockerImageVersionId": 30823,
   "isInternetEnabled": true,
   "language": "python",
   "sourceType": "notebook",
   "isGpuEnabled": true
  }
 },
 "nbformat_minor": 5,
 "nbformat": 4,
 "cells": [
  {
   "id": "772f1079d76766c9",
   "cell_type": "code",
   "source": [
    "import torch\n",
    "from collections import OrderedDict\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import scipy.io\n",
    "#from pyDOE import lhs#拉丁超立方抽样\n",
    "from torch import nn\n",
    "import time\n",
    "import warnings\n",
    "from torch.optim.lr_scheduler import StepLR\n",
    "warnings.filterwarnings(\"ignore\", category=UserWarning)  # 只忽略UserWarning类型的警告"
   ],
   "metadata": {
    "id": "772f1079d76766c9",
    "execution": {
     "iopub.status.busy": "2025-01-08T12:23:16.445347Z",
     "iopub.execute_input": "2025-01-08T12:23:16.445643Z",
     "iopub.status.idle": "2025-01-08T12:23:19.528495Z",
     "shell.execute_reply.started": "2025-01-08T12:23:16.445621Z",
     "shell.execute_reply": "2025-01-08T12:23:19.527823Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:54.610245Z",
     "start_time": "2025-02-21T12:59:49.965646Z"
    }
   },
   "execution_count": 1,
   "outputs": []
  },
  {
   "id": "d51af55a594658b8",
   "cell_type": "code",
   "source": [
    "if torch.cuda.is_available():\n",
    "    device = torch.device('cuda')\n",
    "else:\n",
    "    device = torch.device('cpu')\n",
    "def setup_seed(seed):\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed_all(seed)\n",
    "    np.random.seed(seed)\n",
    "    #  random.seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "\n",
    "setup_seed(124)\n",
    "dtype = torch.double"
   ],
   "metadata": {
    "id": "d51af55a594658b8",
    "execution": {
     "iopub.status.busy": "2025-01-08T12:23:19.529308Z",
     "iopub.execute_input": "2025-01-08T12:23:19.529676Z",
     "iopub.status.idle": "2025-01-08T12:23:19.612766Z",
     "shell.execute_reply.started": "2025-01-08T12:23:19.529654Z",
     "shell.execute_reply": "2025-01-08T12:23:19.611877Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:54.626308Z",
     "start_time": "2025-02-21T12:59:54.612251Z"
    }
   },
   "execution_count": 2,
   "outputs": []
  },
  {
   "id": "fe0818329fdb65c3",
   "cell_type": "code",
   "source": [
    "#求导\n",
    "def gradients(outputs, inputs):\n",
    "    return torch.autograd.grad(outputs, inputs, grad_outputs=torch.ones_like(outputs), create_graph=True)\n",
    "#类型转换\n",
    "def to_numpy(input):\n",
    "    if isinstance(input, torch.Tensor):\n",
    "        return input.detach().cpu().numpy()\n",
    "    elif isinstance(input, np.ndarray):\n",
    "        return input"
   ],
   "metadata": {
    "id": "fe0818329fdb65c3",
    "execution": {
     "iopub.status.busy": "2025-01-08T12:23:19.614557Z",
     "iopub.execute_input": "2025-01-08T12:23:19.614899Z",
     "iopub.status.idle": "2025-01-08T12:23:19.623419Z",
     "shell.execute_reply.started": "2025-01-08T12:23:19.614872Z",
     "shell.execute_reply": "2025-01-08T12:23:19.622446Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:54.641484Z",
     "start_time": "2025-02-21T12:59:54.627311Z"
    }
   },
   "execution_count": 3,
   "outputs": []
  },
  {
   "id": "60a72b4d3732df3b",
   "cell_type": "code",
   "source": [
    "#构建网络layers\n",
    "def Dnnlayers(input_layers,output_layers,hidden_layers,neural):\n",
    "    layers = []\n",
    "    for i in range(hidden_layers + 2):\n",
    "        if i == 0:\n",
    "            layers.append(input_layers)\n",
    "        elif i == hidden_layers + 1:\n",
    "            layers.append(output_layers)\n",
    "        else:\n",
    "            layers.append(neural)\n",
    "    return layers"
   ],
   "metadata": {
    "id": "60a72b4d3732df3b",
    "execution": {
     "iopub.status.busy": "2025-01-08T12:23:19.777502Z",
     "iopub.execute_input": "2025-01-08T12:23:19.777820Z",
     "iopub.status.idle": "2025-01-08T12:23:19.782516Z",
     "shell.execute_reply.started": "2025-01-08T12:23:19.777793Z",
     "shell.execute_reply": "2025-01-08T12:23:19.781478Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:54.656752Z",
     "start_time": "2025-02-21T12:59:54.643583Z"
    }
   },
   "execution_count": 4,
   "outputs": []
  },
  {
   "id": "4c739aded6291406",
   "cell_type": "code",
   "source": [
    "class DNN(nn.Module):#for 状态方程\n",
    "    def __init__(self, layers):\n",
    "        #继承父类\n",
    "        super(DNN, self).__init__()\n",
    "\n",
    "        #depth of network\n",
    "        self.depth = len(layers) - 1\n",
    "        #print(self.depth)\n",
    "\n",
    "        #activation of network\n",
    "        self.activation = nn.Tanh()\n",
    "        self.positive_activation = nn.Softplus()\n",
    "        #create the neural network\n",
    "        layers_list = list()\n",
    "        for i in range(self.depth - 1):\n",
    "            layers_list.append(\n",
    "                ('layer_%d' % i, nn.Linear(layers[i], layers[i+1]))#create each network\n",
    "            )\n",
    "            layers_list.append(\n",
    "                ('activation_%d' % i, self.activation)  #create each activation of network\n",
    "            )\n",
    "        layers_list.append(\n",
    "            ('layer_%d' % (self.depth - 1), nn.Linear(layers[-2], layers[-1]))\n",
    "            #last network do not have activation\n",
    "        )\n",
    "        #创建一个有序字典，其中包含了从 layers_list 中获得的键值对，这在需要保持元素顺序的场景（如神经网络层的顺序）中非常有用。\n",
    "        layerDict = OrderedDict(layers_list)\n",
    "\n",
    "        #deploy layers\n",
    "        self.layers = nn.Sequential(layerDict)\n",
    "        #print(self.layers)\n",
    "        self.positive_activation = nn.Softplus()  # 定义 Softplus 激活函数\n",
    "    #forword network,output the result of network\n",
    "    def forward(self, x):\n",
    "        #out = torch.exp(self.layers(x))\n",
    "        out = self.layers(x)\n",
    "        out = self.positive_activation(out)  # 这里使用 Softplu#ln(e^x+1)\n",
    "        #out = self.positive_activation(out)\n",
    "        return out"
   ],
   "metadata": {
    "id": "4c739aded6291406",
    "execution": {
     "iopub.status.busy": "2025-01-08T12:23:22.080273Z",
     "iopub.execute_input": "2025-01-08T12:23:22.080568Z",
     "iopub.status.idle": "2025-01-08T12:23:22.092118Z",
     "shell.execute_reply.started": "2025-01-08T12:23:22.080547Z",
     "shell.execute_reply": "2025-01-08T12:23:22.091326Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:54.672491Z",
     "start_time": "2025-02-21T12:59:54.657899Z"
    }
   },
   "execution_count": 5,
   "outputs": []
  },
  {
   "id": "d07f63b4c7b9d6b7",
   "cell_type": "code",
   "source": [
    "class Net_2_for_p_data():\n",
    "    def __init__(self, layers):\n",
    "        self.dnn = DNN(layers).double().to(device)\n",
    "    def l2_loss(self):\n",
    "        l2_loss = torch.tensor(0.0, device=device, dtype=dtype, requires_grad=True)\n",
    "        for name,parma in self.dnn.named_parameters():\n",
    "            if 'bias' not in name:\n",
    "                l2_loss = l2_loss + (0.5 * torch.sum(torch.pow(parma,2)))\n",
    "        return l2_loss\n",
    "    def loss_data(self, rho_e, p_data):\n",
    "        p_data_predict = self.dnn.forward(rho_e)[:, 0:1]\n",
    "        loss_data = ((p_data_predict - p_data) ** 2).mean()\n",
    "        return loss_data\n",
    "class Net_2_for_p():\n",
    "    def __init__(self, layers):\n",
    "        self.dnn = DNN(layers ).double().to(device)\n",
    "    def loss_pde(self, rho_e, u, rho_x, u_x, e_x, u_t, e_t):\n",
    "        rho = rho_e[:, 0:1]\n",
    "        p = self.dnn.forward(rho_e)\n",
    "        dp_g = gradients(p, rho_e)[0]\n",
    "        p_rho, p_e = dp_g[:, :1], dp_g[:, 1:]\n",
    "        #\n",
    "        p_x = p_rho * rho_x + p_e * e_x\n",
    "\n",
    "        f = ((rho * u_t + rho * u * u_x + p_x)**2).mean() + ((rho * e_t + rho * u * e_x + p * u_x)**2).mean()\n",
    "        return f\n",
    "    def loss_data(self, rho_e, p_exact):\n",
    "        p = self.dnn.forward(rho_e)\n",
    "        loss_data = ((p - p_exact) ** 2).mean()\n",
    "        return loss_data\n",
    "#\n"
   ],
   "metadata": {
    "id": "d07f63b4c7b9d6b7",
    "execution": {
     "iopub.status.busy": "2025-01-08T13:59:04.846426Z",
     "iopub.execute_input": "2025-01-08T13:59:04.846739Z",
     "iopub.status.idle": "2025-01-08T13:59:04.863279Z",
     "shell.execute_reply.started": "2025-01-08T13:59:04.846717Z",
     "shell.execute_reply": "2025-01-08T13:59:04.862380Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:54.703426Z",
     "start_time": "2025-02-21T12:59:54.674001Z"
    }
   },
   "execution_count": 6,
   "outputs": []
  },
  {
   "id": "9f67a2ebd1efcaa8",
   "cell_type": "code",
   "source": [
    "#training\n",
    "def train(epoch):\n",
    "    model.dnn.train()\n",
    "    def closure():\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        #loss_data(self, x_rho, rho_data, x_u, u_data, x_p, p_data):\n",
    "        loss_data = model.loss_data(rho_e_data_train, p_exact)\n",
    "        #loss_con(self, x_ec, x_ic)\n",
    "        loss_l2 = model.l2_loss()\n",
    "        #total loss\n",
    "        loss =  10*loss_data + alpha * loss_l2# + loss_con\n",
    "\n",
    "        if epoch%10 ==0 :\n",
    "            print(f'epoch:{epoch}, loss:{loss:.8f},loss_data:{loss_data:.8f}loss_l2:{loss_l2:.8f}')\n",
    "\n",
    "\n",
    "\n",
    "        loss_total_history.append(to_numpy(loss))\n",
    "        loss_data_history.append(to_numpy(loss_data))\n",
    "\n",
    "        loss.backward()\n",
    "        return loss\n",
    "    loss = optimizer.step(closure)"
   ],
   "metadata": {
    "id": "9f67a2ebd1efcaa8",
    "execution": {
     "iopub.status.busy": "2025-01-08T13:59:06.814653Z",
     "iopub.execute_input": "2025-01-08T13:59:06.815099Z",
     "iopub.status.idle": "2025-01-08T13:59:06.821298Z",
     "shell.execute_reply.started": "2025-01-08T13:59:06.815064Z",
     "shell.execute_reply": "2025-01-08T13:59:06.820337Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:54.718323Z",
     "start_time": "2025-02-21T12:59:54.704484Z"
    }
   },
   "execution_count": 7,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-21T14:08:36.875153Z",
     "start_time": "2025-02-21T14:08:36.790764Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case1.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "Exact_p = np.real(data['p']).T\n",
    "print(Exact_rho.shape)\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train1 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train1 = np.vstack([rho_e_data_train1, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact1 = Exact_p[:, 0:1]\n",
    "p_exact1 = np.vstack([p_exact1, Exact_p[:, -1:]])\n",
    "rho1 = Exact_rho.flatten()[:, None]\n",
    "e1 = Exact_e.flatten()[:, None]\n",
    "rho_up1 = to_numpy(rho1).max(0)\n",
    "rho_lb1 = to_numpy(rho1).min(0)\n",
    "e_up1 = to_numpy(e1).max(0)\n",
    "e_lb1 = to_numpy(e1).min(0)\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case2.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train2 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train2 = np.vstack([rho_e_data_train2, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact2 = Exact_p[:, 0:1]\n",
    "p_exact2 = np.vstack([p_exact2, Exact_p[:, -1:]])\n",
    "rho2 = Exact_rho.flatten()[:, None]\n",
    "e2 = Exact_e.flatten()[:, None]\n",
    "rho_up2 = to_numpy(rho2).max(0)\n",
    "rho_lb2 = to_numpy(rho2).min(0)\n",
    "e_up2 = to_numpy(e2).max(0)\n",
    "e_lb2 = to_numpy(e2).min(0)\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case3.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train3 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train3 = np.vstack([rho_e_data_train3, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact3 = Exact_p[:, 0:1]\n",
    "p_exact3 = np.vstack([p_exact3, Exact_p[:, -1:]])\n",
    "rho3 = Exact_rho.flatten()[:, None]\n",
    "e3 = Exact_e.flatten()[:, None]\n",
    "rho_up3 = to_numpy(rho3).max(0)\n",
    "rho_lb3 = to_numpy(rho3).min(0)\n",
    "e_up3 = to_numpy(e3).max(0)\n",
    "e_lb3 = to_numpy(e3).min(0)\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case4.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train4 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train4 = np.vstack([rho_e_data_train4, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact4 = Exact_p[:, 0:1]\n",
    "p_exact4 = np.vstack([p_exact4, Exact_p[:, -1:]])\n",
    "rho4 = Exact_rho.flatten()[:, None]\n",
    "e4 = Exact_e.flatten()[:, None]\n",
    "rho_up4 = to_numpy(rho4).max(0)\n",
    "rho_lb4 = to_numpy(rho4).min(0)\n",
    "e_up4 = to_numpy(e4).max(0)\n",
    "e_lb4 = to_numpy(e4).min(0)\n",
    "#loss_data(self, rho_e, p_exact):\n",
    "data = scipy.io.loadmat(r'train_eos1_case5.mat')  # Import Solution data\n",
    "Exact_rho = np.real(data['rho']).T # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "rho_e_data_train5 = np.hstack((Exact_rho[:, 0:1], Exact_e[:, 0:1]))\n",
    "rho_e_data_train5 = np.vstack([rho_e_data_train5, np.hstack((Exact_rho[:, -1:], Exact_e[:, -1:]))])\n",
    "p_exact5 = Exact_p[:, 0:1]\n",
    "p_exact5 = np.vstack([p_exact5, Exact_p[:, -1:]])\n",
    "rho5 = Exact_rho.flatten()[:, None]\n",
    "e5 = Exact_e.flatten()[:, None]\n",
    "rho_up5 = to_numpy(rho5).max(0)\n",
    "rho_lb5 = to_numpy(rho5).min(0)\n",
    "e_up5 = to_numpy(e5).max(0)\n",
    "e_lb5 = to_numpy(e5).min(0)"
   ],
   "id": "3dfb5dc3ff9b566d",
   "execution_count": 18,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:58.929808Z",
     "start_time": "2025-02-21T12:59:54.797145Z"
    }
   },
   "cell_type": "code",
   "source": [
    "plt.figure()\n",
    "plt.scatter(rho1, e1, s=3, c='r', label='case1')\n",
    "plt.scatter(rho2, e2, s=3, c='b', label='case2')\n",
    "plt.scatter(rho3, e3, s=3, c='y', label='case3')\n",
    "plt.scatter(rho4, e4, s=3, c='g', label='case4')\n",
    "plt.scatter(rho5, e5, s=3, c='c', label='case5')\n",
    "#plt.scatter(rho6, e6, s=3, c='b', label='case6')\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.legend()\n",
    "plt.show()"
   ],
   "id": "2922bbdd83e73628",
   "execution_count": 9,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:59.131373Z",
     "start_time": "2025-02-21T12:59:58.931782Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#loss_data(self, rho_e, p_exact):\n",
    "rho_e_data_train = np.vstack([rho_e_data_train1, rho_e_data_train2, rho_e_data_train3, rho_e_data_train4, rho_e_data_train5])\n",
    "p_exact = np.vstack([p_exact1, p_exact2, p_exact3, p_exact4, p_exact5])\n",
    "plt.figure()\n",
    "plt.scatter(rho_e_data_train[:,0:1], rho_e_data_train[:,1:2], s=3, c='r', label='case')\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.legend()\n",
    "plt.show()\n",
    "print('rho_e_data_train:',rho_e_data_train.shape )\n",
    "\n",
    "#tensor化\n",
    "rho_e_data_train = torch.tensor(rho_e_data_train, dtype=dtype, device=device, requires_grad=True)\n",
    "p_exact = torch.tensor(p_exact, dtype=dtype, device=device)"
   ],
   "id": "f699d7b348918bc9",
   "execution_count": 10,
   "outputs": []
  },
  {
   "id": "24bea047f2118673",
   "cell_type": "code",
   "source": [
    "'''#\n",
    "data = scipy.io.loadmat(r'train_eos1_case2.mat')  # Import Solution data\n",
    "x_data = data['x'].flatten()[:, None] # Partitioned spatial coordinates\n",
    "t_data = data['t'].flatten()[:, None]  # Partitioned spatial coordinates\n",
    "Exact_rho = np.real(data['rho']).T  # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "Exact_p = np.real(data['p']).T\n",
    "Exact_u = np.real(data['u']).T\n",
    "Exact_e = Exact_p / Exact_rho / 0.4\n",
    "#初始数据\n",
    "p_ic = Exact_p[:, 0:1]\n",
    "e_ic = Exact_e[:, 0:1]\n",
    "rho_ic = Exact_rho[:, 0:1]\n",
    "#末态数据\n",
    "p_end = Exact_p[:, -1:]\n",
    "e_end = Exact_e[:, -1:]\n",
    "rho_end = Exact_rho[:, -1:]\n",
    "#data\n",
    "p_data = np.vstack([p_ic, p_end])\n",
    "e_train = np.vstack([e_ic, e_end])\n",
    "rho_train = np.vstack([rho_ic, rho_end])\n",
    "rho_e_train = np.hstack([rho_train, e_train])\n",
    "#tensor化\n",
    "rho_e_train = torch.tensor(rho_e_train, dtype=dtype, device=device, requires_grad=True)\n",
    "p_data = torch.tensor(p_data, dtype=dtype, device=device)'''"
   ],
   "metadata": {
    "id": "24bea047f2118673",
    "trusted": true,
    "execution": {
     "iopub.status.busy": "2025-01-08T14:00:23.204976Z",
     "iopub.execute_input": "2025-01-08T14:00:23.205293Z",
     "iopub.status.idle": "2025-01-08T14:00:23.230264Z",
     "shell.execute_reply.started": "2025-01-08T14:00:23.205270Z",
     "shell.execute_reply": "2025-01-08T14:00:23.229399Z"
    },
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "outputId": "5fdb3990-dfa2-401a-d77b-cf69d6e28b42",
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:59.146912Z",
     "start_time": "2025-02-21T12:59:59.132375Z"
    }
   },
   "execution_count": 11,
   "outputs": []
  },
  {
   "id": "24612c77b3401f6c",
   "cell_type": "code",
   "source": [
    "layers = Dnnlayers(2, 1, 1, 10)\n",
    "model = Net_2_for_p_data(layers)"
   ],
   "metadata": {
    "id": "24612c77b3401f6c",
    "execution": {
     "iopub.status.busy": "2025-01-08T14:00:30.615034Z",
     "iopub.execute_input": "2025-01-08T14:00:30.615329Z",
     "iopub.status.idle": "2025-01-08T14:00:30.624002Z",
     "shell.execute_reply.started": "2025-01-08T14:00:30.615307Z",
     "shell.execute_reply": "2025-01-08T14:00:30.623092Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T12:59:59.178493Z",
     "start_time": "2025-02-21T12:59:59.148918Z"
    }
   },
   "execution_count": 12,
   "outputs": []
  },
  {
   "id": "36fa46ed331a8794",
   "cell_type": "code",
   "source": [
    "loss_total_history = []\n",
    "loss_data_history = []\n",
    "lr = 0.001\n",
    "alpha = 0.00001\n",
    "optimizer = torch.optim.Adam(model.dnn.parameters(), lr=lr)#,weight_decay=0.0001\n",
    "epochs1 = 15000\n",
    "tic = time.time()\n",
    "for epoch in range(1, epochs1+1):\n",
    "    train(epoch)\n",
    "toc = time.time()"
   ],
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "36fa46ed331a8794",
    "outputId": "edf51034-ad15-4a04-b690-0de122ac914f",
    "execution": {
     "iopub.status.busy": "2025-01-08T14:00:37.456712Z",
     "iopub.execute_input": "2025-01-08T14:00:37.457044Z",
     "execution_failed": "2025-01-08T14:02:49.040Z"
    },
    "trusted": true,
    "ExecuteTime": {
     "end_time": "2025-02-21T13:00:13.131988Z",
     "start_time": "2025-02-21T12:59:59.179496Z"
    }
   },
   "execution_count": 13,
   "outputs": []
  },
  {
   "id": "86c0f9a889329a74",
   "cell_type": "code",
   "source": [
    "optimizer = torch.optim.LBFGS(model.dnn.parameters(),lr=1.0,max_iter=20)\n",
    "epochs2 = 2000\n",
    "tic = time.time()\n",
    "for epoch in range(1, epochs2+1):\n",
    "    train(epoch)\n",
    "toc = time.time()"
   ],
   "metadata": {
    "id": "86c0f9a889329a74",
    "trusted": true,
    "execution": {
     "iopub.status.busy": "2025-01-08T12:48:27.065211Z",
     "iopub.execute_input": "2025-01-08T12:48:27.065590Z",
     "iopub.status.idle": "2025-01-08T13:16:03.958133Z",
     "shell.execute_reply.started": "2025-01-08T12:48:27.065552Z",
     "shell.execute_reply": "2025-01-08T13:16:03.957165Z"
    },
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "outputId": "1d47db29-97f3-4ce6-f35c-0058712519a9",
    "ExecuteTime": {
     "end_time": "2025-02-21T13:00:14.985294Z",
     "start_time": "2025-02-21T13:00:13.132990Z"
    }
   },
   "execution_count": 14,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-21T13:00:15.015922Z",
     "start_time": "2025-02-21T13:00:14.986297Z"
    }
   },
   "cell_type": "code",
   "source": "torch.save(model, 'data_eos1_model.pth')",
   "id": "2c815d916db21e91",
   "execution_count": 15,
   "outputs": []
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-21T13:00:15.047082Z",
     "start_time": "2025-02-21T13:00:15.017007Z"
    }
   },
   "cell_type": "code",
   "source": "Net2_model = torch.load('Net2_eos1_model.pth', map_location=device)",
   "id": "a5912e8dc413a0ea",
   "execution_count": 16,
   "outputs": []
  },
  {
   "id": "7cc2fa17c683c811",
   "cell_type": "code",
   "source": [
    "from mpl_toolkits.axes_grid1 import make_axes_locatable\n",
    "#预测\n",
    "rho_up = max(rho_up1,rho_up2,rho_up3,rho_up4, rho_up5)\n",
    "rho_lb = min(rho_lb1,rho_lb2,rho_lb3,rho_lb4, rho_lb5)\n",
    "e_up = max(e_up1, e_up2, e_up3, e_up4, e_up5)\n",
    "e_lb = min(e_lb1, e_lb2, e_lb3, e_lb4, e_lb5)\n",
    "print(rho_up,rho_lb,e_up,e_lb)\n",
    "size1 = 200\n",
    "size2 = 300\n",
    "rho = np.linspace(rho_lb, rho_up, size1)\n",
    "e = np.linspace(e_lb, e_up, size2)\n",
    "rho_test_grid, e_test_grid = np.meshgrid(rho, e)\n",
    "rho_e_test = np.hstack((rho_test_grid.flatten()[:,None], e_test_grid.flatten()[:,None]))\n",
    "p_test_exact = (rho_e_test[:, -1:] * rho_e_test[:, 0:1] * 0.4).reshape(size2,size1)\n",
    "\n",
    "rho_e_test = torch.tensor(rho_e_test, requires_grad=True, dtype=dtype).to(device)\n",
    "p_data_predict = to_numpy(model.dnn.forward(rho_e_test)).reshape(size2,size1)\n",
    "p_PINN_predict = to_numpy(Net2_model.dnn.forward(rho_e_test)).reshape(size2,size1)\n",
    "\n",
    "#泛化性\n",
    "plt.figure()\n",
    "plt.contourf(rho_test_grid, e_test_grid, p_data_predict, levels=np.linspace(p_data_predict.min(), p_data_predict.max(), 1000), cmap='jet')\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_data_predict')\n",
    "plt.savefig(r\"p_data_predict.png\")\n",
    "plt.show()\n",
    "\n",
    "#泛化性\n",
    "plt.figure()\n",
    "plt.contourf(rho_test_grid, e_test_grid, p_PINN_predict, levels=np.linspace(p_PINN_predict.min(), p_PINN_predict.max(), 1000), cmap='jet')\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_PINN_predict')\n",
    "plt.show()\n",
    "\n",
    "plt.figure()\n",
    "plt.contourf(rho_test_grid, e_test_grid, p_test_exact, levels=np.linspace(p_test_exact.min(), p_test_exact.max(), 1000), cmap='jet')\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_exact')\n",
    "plt.show()\n",
    "\n",
    "#p的error\n",
    "error_p = abs(p_data_predict-p_test_exact)\n",
    "plt.figure()\n",
    "plt.contourf(rho_test_grid, e_test_grid, error_p, levels=np.linspace(error_p.min(), error_p.max(), 1000), cmap='jet')\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_error')\n",
    "plt.savefig(r\"data_p_error.png\")\n",
    "plt.show()\n",
    "\n",
    "#p2的error\n",
    "error_p2 = abs(p_PINN_predict-p_test_exact)\n",
    "plt.figure()\n",
    "plt.contourf(rho_test_grid, e_test_grid, error_p2, levels=np.linspace(error_p2.min(), error_p2.max(), 1000), cmap='jet')\n",
    "plt.colorbar()\n",
    "plt.xlabel('rho')\n",
    "plt.ylabel('e')\n",
    "plt.title('p_PINN_error')\n",
    "plt.show()\n",
    "\n",
    "g_min = np.around(min(error_p2.min(), error_p.min()), decimals=5)\n",
    "g_max = np.around(max(error_p2.max(), error_p.max()), decimals=5)\n",
    "# 创建一个包含两个子图的图像\n",
    "fig, axes = plt.subplots(1, 2, figsize=(12, 5))  # 1行2列的子图布局\n",
    "\n",
    "# 绘制第一个error (p_error)\n",
    "contour1 = axes[0].contourf(rho_test_grid, e_test_grid, error_p, \n",
    "                            levels=np.linspace(g_min, g_max, 1000), cmap='jet')\n",
    "axes[0].set_xlabel('rho')\n",
    "axes[0].set_ylabel('e')\n",
    "axes[0].set_title('p_error')\n",
    "#fig.colorbar(contour1, ax=axes[0])  # 为第一个子图添加颜色条\n",
    "\n",
    "# 绘制第二个error (p2_error)\n",
    "contour2 = axes[1].contourf(rho_test_grid, e_test_grid, error_p2, \n",
    "                            levels=np.linspace(g_min, g_max, 1000), cmap='jet')\n",
    "axes[1].set_xlabel('rho')\n",
    "axes[1].set_ylabel('e')\n",
    "axes[1].set_title('p2_error')\n",
    "#fig.colorbar(contour2, ax=axes[1])  # 为第二个子图添加颜色条\n",
    "\n",
    "# 添加共享的颜色条\n",
    "# 创建一个Axes对象用于颜色条\n",
    "divider = make_axes_locatable(axes[1])  # 使用axes[1]作为参考\n",
    "cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1)  # 在右侧添加一个颜色条区域\n",
    "\n",
    "# 将颜色条放置在两个子图之间\n",
    "fig.colorbar(contour1, cax=cax, orientation='vertical')  # contour1 或 contour2 都可以，因为它们共享相同的颜色范围\n",
    "\n",
    "# 调整布局并保存图像\n",
    "plt.tight_layout(rect=[0, 0, 1, 1])  # 确保颜色条不被裁剪\n",
    "plt.show()\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#没有图像title\n",
    "g_min = np.around(min(error_p2.min(), error_p.min()), decimals=5)\n",
    "g_max = np.around(max(error_p2.max(), error_p.max()), decimals=5)\n",
    "# 创建一个包含两个子图的图像\n",
    "fig, axes = plt.subplots(1, 2, figsize=(12, 5))  # 1行2列的子图布局\n",
    "\n",
    "# 绘制第一个error (p_error)\n",
    "contour1 = axes[0].contourf(rho_test_grid, e_test_grid, error_p, \n",
    "                            levels=np.linspace(g_min, g_max, 1000), cmap='jet')\n",
    "axes[0].set_xlabel('rho')\n",
    "axes[0].set_ylabel('e')\n",
    "#axes[0].set_title('p_error')\n",
    "#fig.colorbar(contour1, ax=axes[0])  # 为第一个子图添加颜色条\n",
    "\n",
    "# 绘制第二个error (p2_error)\n",
    "contour2 = axes[1].contourf(rho_test_grid, e_test_grid, error_p2, \n",
    "                            levels=np.linspace(g_min, g_max, 1000), cmap='jet')\n",
    "axes[1].set_xlabel('rho')\n",
    "axes[1].set_ylabel('e')\n",
    "#axes[1].set_title('p2_error')\n",
    "#fig.colorbar(contour2, ax=axes[1])  # 为第二个子图添加颜色条\n",
    "\n",
    "# 添加共享的颜色条\n",
    "# 创建一个Axes对象用于颜色条\n",
    "divider = make_axes_locatable(axes[1])  # 使用axes[1]作为参考\n",
    "cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1)  # 在右侧添加一个颜色条区域\n",
    "\n",
    "# 将颜色条放置在两个子图之间\n",
    "fig.colorbar(contour1, cax=cax, orientation='vertical')  # contour1 或 contour2 都可以，因为它们共享相同的颜色范围\n",
    "\n",
    "# 调整布局并保存图像\n",
    "plt.tight_layout(rect=[0, 0, 1, 1])  # 确保颜色条不被裁剪\n",
    "plt.savefig(\"data_combined_errors.png\")\n",
    "plt.show()\n",
    "\n",
    "\n",
    "eporchx = np.arange(1, len(loss_data_history)+1)\n",
    "plt.figure()\n",
    "plt.plot(eporchx, loss_total_history, label='loss_total_history')\n",
    "plt.plot(eporchx, loss_data_history, label='loss_data_history')\n",
    "plt.yscale('log')\n",
    "plt.title('loss with eproch')\n",
    "plt.legend()\n",
    "plt.xlabel('eporch')\n",
    "plt.ylabel('loss')\n",
    "#plt.savefig(r\"data_loss.png\")\n",
    "plt.show()\n",
    "\n",
    "\n",
    "#MSE\n",
    "print('MSE error for every var')\n",
    "print('error_p2 is ',(error_p2**2).mean())\n",
    "print('error_p is ',(error_p**2).mean())\n",
    "print('*****************************************')\n",
    "#L2 error\n",
    "print('L2 error for every var')\n",
    "print('error_p2 is ',np.sqrt((error_p2**2).mean()))\n",
    "print('error_p is ',np.sqrt((error_p**2).mean()))\n",
    "print('*****************************************')\n",
    "#相对误差\n",
    "print('relative error for every var')\n",
    "print('error_p2 is ',np.sqrt(np.linalg.norm(error_p2, ord=2)/np.linalg.norm(p_exact, ord=2)))\n",
    "print('error_p is ',np.sqrt(np.linalg.norm(error_p, ord=2)/np.linalg.norm(p_exact, ord=2)))\n",
    "# 打开一个文件用于写入\n",
    "with open('data_errors.txt', 'w') as file:\n",
    "    file.write(\"# MSE errors for every variable\\n\")\n",
    "    file.write(f\"MSE error for p2: error_p2**2.mean(): {round((error_p2**2).mean(), 8)}\\n\")\n",
    "\n",
    "    file.write(\"# L2 errors for every variable\\n\")\n",
    "    file.write(f\"L2 error for p2: {np.sqrt((error_p2**2).mean()):.4f}\\n\")\n",
    "\n",
    "    file.write(\"# Relative errors for every variable\\n\")\n",
    "    file.write(f\"Relative error for p2: {np.sqrt(np.linalg.norm(error_p2, ord=2) / np.linalg.norm(p_exact, ord=2)):.4f}\\n\")\n",
    "\n",
    "    file.write(\"# Tend MSE errors for every variable\\n\")\n",
    "    file.write(f\"Tend MSE error for p2: error_p2**2.mean(): {round((error_p2**2).mean(), 8)}\\n\")\n",
    "\n",
    "    file.write(\"# Tend L2 errors for every variable\\n\")\n",
    "    file.write(f\"Tend L2 error for p2: {np.sqrt((error_p2**2).mean()):.4f}\\n\")\n",
    "\n",
    "    file.write(\"#Tend Relative errors for every variable\\n\")\n",
    "    file.write(f\"Tend Relative error for p2: {np.sqrt(np.linalg.norm(error_p2, ord=2) / np.linalg.norm(p_exact, ord=2)):.4f}\\n\")\n",
    "print(\"Errors have been written to errors.txt\")"
   ],
   "metadata": {
    "id": "7cc2fa17c683c811",
    "execution": {
     "iopub.status.busy": "2025-01-08T13:29:30.317026Z",
     "iopub.execute_input": "2025-01-08T13:29:30.317353Z",
     "iopub.status.idle": "2025-01-08T13:29:39.269335Z",
     "shell.execute_reply.started": "2025-01-08T13:29:30.317329Z",
     "shell.execute_reply": "2025-01-08T13:29:39.267479Z"
    },
    "trusted": true,
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "outputId": "c1f4fb36-3b5f-4f05-9e99-43e89196227d",
    "ExecuteTime": {
     "end_time": "2025-02-21T13:00:25.983390Z",
     "start_time": "2025-02-21T13:00:15.048085Z"
    }
   },
   "execution_count": 17,
   "outputs": []
  },
  {
   "id": "6ae1d006a7a81346",
   "cell_type": "code",
   "source": [
    "#保存全区间内的导数\n",
    "data = scipy.io.loadmat(r'train_eos1_case1.mat')  # Import Solution data\n",
    "x_data = data['x'].flatten()[:, None] # Partitioned spatial coordinates\n",
    "t_data = data['t'].flatten()[:, None]  # Partitioned spatial coordinates\n",
    "Exact_rho = np.real(data['rho']) # Exact total rho(x,t)，即Exact_rho[:,0]是初值\n",
    "Exact_p = np.real(data['p'])\n",
    "Exact_u = np.real(data['u'])\n",
    "#构建网格\n",
    "t_data_grid, x_data_grid = np.meshgrid(t_data, x_data)\n",
    "T_data = t_data_grid.flatten()[:, None]\n",
    "X_data = x_data_grid.flatten()[:, None]\n",
    "x_test = torch.tensor(np.hstack((T_data, X_data)), requires_grad=True, dtype=dtype).to(device)\n",
    "#预测\n",
    "U_predict = model.Net1.forward(x_test)\n",
    "rho_predict = U_predict[:, 0:1]\n",
    "u_predict = U_predict[:, 1:2]\n",
    "e_predict = U_predict[:, 2:3]\n",
    "\n",
    "'''rho_e_test = np.hstack([to_numpy(rho_predict), to_numpy(e_predict)])\n",
    "rho_e_test = torch.tensor(rho_e_test, dtype=dtype, device=device, requires_grad=True)'''\n",
    "p_predict = to_numpy(model.Net.forward(x_test))\n",
    "\n",
    "#预测导数\n",
    "drho_g = gradients(rho_predict, x_test)[0]\n",
    "rho_t, rho_x = drho_g[:, :1], drho_g[:, 1:]\n",
    "de_g = gradients(e_predict, x_test)[0]\n",
    "e_t, e_x = de_g[:, :1], de_g[:, 1:]\n",
    "du_g = gradients(u_predict, x_test)[0]\n",
    "u_t, u_x = du_g[:, :1], du_g[:, 1:]\n",
    "rho_predict = rho_predict.reshape(len(x_data), len(t_data)).T\n",
    "u_predict = u_predict.reshape(len(x_data), len(t_data)).T\n",
    "e_predict = e_predict.reshape(len(x_data), len(t_data)).T\n",
    "p_predict = p_predict.reshape(len(x_data), len(t_data)).T\n",
    "rho_t = rho_t.reshape(len(x_data), len(t_data)).T\n",
    "u_t = u_t.reshape(len(x_data), len(t_data)).T\n",
    "e_t = e_t.reshape(len(x_data), len(t_data)).T\n",
    "rho_x = rho_x.reshape(len(x_data), len(t_data)).T\n",
    "u_x = u_x.reshape(len(x_data), len(t_data)).T\n",
    "e_x = e_x.reshape(len(x_data), len(t_data)).T\n",
    "print('rho_predict.shape',rho_predict.shape)\n",
    "#保存\n",
    "scipy.io.savemat('train_eos1_case1_predict.mat', {'x':x_data,'t':t_data,'rho':to_numpy(rho_predict),'u':to_numpy(u_predict),'e':to_numpy(e_predict), 'p':to_numpy(p_predict),'rho_t':to_numpy(rho_t),'rho_x':to_numpy(rho_x),'e_t':to_numpy(e_t),'e_x':to_numpy(e_x),'u_t':to_numpy(u_t),'u_x':to_numpy(u_x)})"
   ],
   "metadata": {
    "id": "6ae1d006a7a81346",
    "execution": {
     "iopub.status.busy": "2025-01-08T13:44:39.379463Z",
     "iopub.execute_input": "2025-01-08T13:44:39.379853Z",
     "iopub.status.idle": "2025-01-08T13:44:39.584006Z",
     "shell.execute_reply.started": "2025-01-08T13:44:39.379821Z",
     "shell.execute_reply": "2025-01-08T13:44:39.583076Z"
    },
    "trusted": true,
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "outputId": "a9da3c86-4d3e-4b5d-84cd-53a4eb8abfb2",
    "ExecuteTime": {
     "end_time": "2025-01-15T04:43:03.024580Z",
     "start_time": "2025-01-15T04:43:02.621354Z"
    }
   },
   "execution_count": 16,
   "outputs": []
  },
  {
   "id": "c226b9d91185272e",
   "cell_type": "code",
   "source": [
    "torch.save(model, 'train_eos1_case1_model.pth')"
   ],
   "metadata": {
    "id": "c226b9d91185272e",
    "ExecuteTime": {
     "end_time": "2025-01-15T04:43:04.181503Z",
     "start_time": "2025-01-15T04:43:04.118282Z"
    }
   },
   "execution_count": 17,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "wcGkpz3Nc93E"
   },
   "id": "wcGkpz3Nc93E",
   "execution_count": null,
   "outputs": []
  }
 ]
}
