{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Num GPUs Available:  1\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from keras.models import Sequential\n",
    "from keras.layers import LSTM, Dense\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.preprocessing import MaxAbsScaler\n",
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "from keras.optimizers import Adam\n",
    "from itertools import product\n",
    "import random\n",
    "import os\n",
    "\n",
    "seed=22\n",
    "random.seed(seed)\n",
    "np.random.seed(seed)\n",
    "tf.random.set_seed(seed)\n",
    "os.environ['PYTHONHASHSEED'] = str(seed)\n",
    "tf.config.experimental.enable_op_determinism()\n",
    "\n",
    "# 指定 GPU 设备\n",
    "physical_devices = tf.config.list_physical_devices('GPU') \n",
    "if len(physical_devices) > 0:\n",
    "    tf.config.experimental.set_memory_growth(physical_devices[0], True)\n",
    "\n",
    "# 检查是否设置了 GPU\n",
    "print(\"Num GPUs Available: \", len(physical_devices))\n",
    "\n",
    "# 余下的代码与你的原始代码保持不变\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              price\n",
      "0                  \n",
      "2018-01-01  1.29100\n",
      "2018-01-02  1.31205\n",
      "2018-01-03  1.31490\n",
      "2018-01-04  1.31450\n",
      "2018-01-05  1.31715\n",
      "...             ...\n",
      "2019-05-27  1.28250\n",
      "2019-05-28  1.27830\n",
      "2019-05-29  1.28165\n",
      "2019-05-30  1.28095\n",
      "2019-05-31  1.29555\n",
      "\n",
      "[516 rows x 1 columns]\n"
     ]
    }
   ],
   "source": [
    "start_date = 20180101\n",
    "end_date = 20190601\n",
    "size=9601\n",
    "\n",
    "start_time = pd.Timestamp(str(start_date))\n",
    "end_time = pd.Timestamp(str(end_date))\n",
    "\n",
    "\n",
    "# 读取数据\n",
    "priceFile='../price/DailyUSDPrice.xlsx'\n",
    "price = pd.read_excel(priceFile,header=None)\n",
    "\n",
    "# 将第 0 列转换为 datetime 类型\n",
    "price[0] = pd.to_datetime(price[0])\n",
    "\n",
    "price=price[(price[0] >= start_time) & (price[0] < end_time)]\n",
    "\n",
    "# 将第 0 列设置为索引\n",
    "price.set_index(0, inplace=True)\n",
    "\n",
    "price=price/1000\n",
    "\n",
    "# 重命名列\n",
    "price.columns = ['price']\n",
    "\n",
    "# 显示 DataFrame\n",
    "print(price)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                   消费者     利率         道琼斯\n",
      "0                                        \n",
      "2018-01-01  247.867000  0.015  24797.8125\n",
      "2018-01-02  247.903258  0.015  24824.0100\n",
      "2018-01-03  247.939516  0.015  24922.6800\n",
      "2018-01-04  247.975774  0.015  25075.1300\n",
      "2018-01-05  248.012032  0.015  25295.8700\n",
      "...                ...    ...         ...\n",
      "2019-05-27  256.134774  0.025  25407.2500\n",
      "2019-05-28  256.136419  0.025  25347.7700\n",
      "2019-05-29  256.138065  0.025  25126.4100\n",
      "2019-05-30  256.139710  0.025  25169.8800\n",
      "2019-05-31  256.141355  0.025  24815.0400\n",
      "\n",
      "[516 rows x 3 columns]\n"
     ]
    }
   ],
   "source": [
    "others=['new消费者价格指数','new利率','new道琼斯工业平均指数历史数据']\n",
    "otherData=pd.DataFrame()\n",
    "for i in others:\n",
    "    file='../newCsv/'+i+'.csv'\n",
    "    df=pd.read_csv(file)\n",
    "    df.columns=[0,1]\n",
    "    df[0] = pd.to_datetime(df[0], format='%Y-%m-%d')\n",
    "    \n",
    "    df=df[(df[0] >= start_time) & (df[0] < end_time)]\n",
    "    \n",
    "    df.set_index(0, inplace=True)\n",
    "    # print(df)\n",
    "    \n",
    "    # 横向合并三个 DataFrame\n",
    "    otherData= pd.concat([otherData,df], axis=1)\n",
    "\n",
    "\n",
    "otherData.columns=['消费者','利率','道琼斯']\n",
    "# 显示合并后的 DataFrame\n",
    "print(otherData)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "            1     2     3         4     5     6     7         8     9     \\\n",
      "0                                                                          \n",
      "2018-01-01   1.0   0.0  14.0 -2.514881   1.0   1.9   4.0 -2.514881   1.0   \n",
      "2018-01-02   1.0   4.0   5.0  2.447552   1.0   2.8   5.0  2.447552   1.0   \n",
      "2018-01-03   3.0  -2.0  20.0 -3.860219   1.0   1.9  10.0 -6.752412   2.0   \n",
      "2018-01-04   1.0   3.0  10.0 -8.148148   1.0   1.9  10.0 -1.931519   3.0   \n",
      "2018-01-05   1.0   3.4   4.0 -0.920245   1.0   1.9   2.0 -6.172840   1.0   \n",
      "...          ...   ...   ...       ...   ...   ...   ...       ...   ...   \n",
      "2019-05-27   1.0   2.8   2.0  2.747253   1.0   2.8   2.0  2.786885   1.0   \n",
      "2019-05-28   1.0   2.8   2.0 -3.266788   3.0  -4.0  10.0 -0.203252   1.0   \n",
      "2019-05-29   1.0   0.0   1.0 -3.871829   1.0   0.0   1.0 -3.871829   1.0   \n",
      "2019-05-30   1.0   0.0   1.0  5.288462   1.0   0.0   1.0  5.288462   1.0   \n",
      "2019-05-31   1.0   1.9  10.0 -3.896104   1.0   1.9   6.0 -3.896104   1.0   \n",
      "\n",
      "            10    ...  9591      9592  9593  9594  9595      9596  9597  9598  \\\n",
      "0                 ...                                                           \n",
      "2018-01-01   1.9  ...   1.0  1.948052   2.0  10.0   1.0 -5.056180   2.0  10.0   \n",
      "2018-01-02   4.0  ...   3.0  1.030928   1.0   1.0  10.0  3.286385   1.0   7.0   \n",
      "2018-01-03   7.0  ...   2.0 -2.631579   3.0  -2.0  20.0 -3.413490   3.0  -6.5   \n",
      "2018-01-04  -2.0  ...   2.0 -3.087248   2.0  -2.0   6.0 -3.087248   2.0  -2.0   \n",
      "2018-01-05   1.9  ...   5.0 -1.498127   2.0   7.0  66.0 -4.048607   2.0  -2.0   \n",
      "...          ...  ...   ...       ...   ...   ...   ...       ...   ...   ...   \n",
      "2019-05-27   0.4  ...   2.0  1.213920   3.0  -4.4   8.0 -1.208981   1.0   2.8   \n",
      "2019-05-28   2.8  ...   2.0  1.408451   1.0   1.0   1.0  1.090909   1.0   1.0   \n",
      "2019-05-29   2.8  ...   3.0 -4.841645   4.0  -5.0   3.0 -4.016064   4.0  -5.0   \n",
      "2019-05-30   2.8  ...   8.0 -0.225734   1.0   0.0  10.0  1.988636   1.0   0.0   \n",
      "2019-05-31   1.0  ...   2.0  3.385417   1.0   1.0   4.0  3.594771   1.0   1.0   \n",
      "\n",
      "            9599      9600  \n",
      "0                           \n",
      "2018-01-01   1.0 -5.056180  \n",
      "2018-01-02  10.0  3.286385  \n",
      "2018-01-03   2.0 -3.305785  \n",
      "2018-01-04   2.0 -4.367816  \n",
      "2018-01-05  10.0  0.300300  \n",
      "...          ...       ...  \n",
      "2019-05-27   6.0  1.273885  \n",
      "2019-05-28   1.0  1.090909  \n",
      "2019-05-29   1.0 -4.841645  \n",
      "2019-05-30   6.0  1.756955  \n",
      "2019-05-31   6.0  3.594771  \n",
      "\n",
      "[516 rows x 9600 columns]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "file='../newCsv/'+str(size)+'_'+str(start_date)+'_to_'+str(end_date)+'.csv'\n",
    "GDELT=pd.read_csv(file,header=None)\n",
    "GDELT[0] = pd.to_datetime(GDELT[0], format='%Y%m%d')\n",
    "GDELT=GDELT[(GDELT[0] >= start_time) & (GDELT[0] < end_time)]\n",
    "GDELT.set_index(0, inplace=True)\n",
    "print(GDELT)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "2018-01-01    1.29100\n",
      "2018-01-02    1.31205\n",
      "2018-01-03    1.31490\n",
      "2018-01-04    1.31450\n",
      "2018-01-05    1.31715\n",
      "               ...   \n",
      "2019-05-27    1.28250\n",
      "2019-05-28    1.27830\n",
      "2019-05-29    1.28165\n",
      "2019-05-30    1.28095\n",
      "2019-05-31    1.29555\n",
      "Name: price, Length: 516, dtype: float64\n"
     ]
    }
   ],
   "source": [
    "Y=price['price']\n",
    "print(Y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "          消费者   利率       道琼斯         1      2         3         4         5  \\\n",
      "0    0.000000  0.0  0.596803  0.000000  0.500  0.188406  0.514581  0.000000   \n",
      "1    0.004382  0.0  0.602005  0.000000  0.700  0.057971  0.699467  0.000000   \n",
      "2    0.008764  0.0  0.621597  0.666667  0.400  0.275362  0.464458  0.000000   \n",
      "3    0.013146  0.0  0.651868  0.000000  0.650  0.130435  0.304702  0.000000   \n",
      "4    0.017528  0.0  0.695699  0.000000  0.670  0.043478  0.573993  0.000000   \n",
      "..        ...  ...       ...       ...    ...       ...       ...       ...   \n",
      "511  0.999205  1.0  0.717814  0.000000  0.640  0.014493  0.710633  0.000000   \n",
      "512  0.999404  1.0  0.706004  0.000000  0.640  0.014493  0.486568  0.666667   \n",
      "513  0.999602  1.0  0.662050  0.000000  0.500  0.000000  0.464026  0.000000   \n",
      "514  0.999801  1.0  0.670682  0.000000  0.500  0.000000  0.805311  0.000000   \n",
      "515  1.000000  1.0  0.600224  0.000000  0.595  0.130435  0.463121  0.000000   \n",
      "\n",
      "         6         7  ...      9591      9592      9593  9594      9595  \\\n",
      "0    0.595  0.037975  ...  0.000000  0.765961  0.333333  1.00  0.000000   \n",
      "1    0.640  0.050633  ...  0.011429  0.726761  0.000000  0.55  0.034749   \n",
      "2    0.595  0.113924  ...  0.005714  0.570215  0.666667  0.40  0.073359   \n",
      "3    0.595  0.113924  ...  0.005714  0.550739  0.333333  0.40  0.019305   \n",
      "4    0.595  0.012658  ...  0.022857  0.618662  0.333333  0.85  0.250965   \n",
      "..     ...       ...  ...       ...       ...       ...   ...       ...   \n",
      "511  0.640  0.012658  ...  0.005714  0.734582  0.666667  0.28  0.027027   \n",
      "512  0.300  0.113924  ...  0.005714  0.742897  0.000000  0.55  0.000000   \n",
      "513  0.500  0.000000  ...  0.011429  0.475751  1.000000  0.25  0.007722   \n",
      "514  0.500  0.000000  ...  0.040000  0.673048  0.000000  0.50  0.034749   \n",
      "515  0.595  0.063291  ...  0.005714  0.827398  0.000000  0.55  0.011583   \n",
      "\n",
      "         9596      9597   9598      9599      9600  \n",
      "0    0.451719  0.333333  1.000  0.000000  0.449404  \n",
      "1    0.782806  0.000000  0.850  0.126761  0.778795  \n",
      "2    0.516911  0.666667  0.175  0.014085  0.518516  \n",
      "3    0.529859  0.333333  0.400  0.014085  0.476583  \n",
      "4    0.491706  0.333333  0.400  0.126761  0.660895  \n",
      "..        ...       ...    ...       ...       ...  \n",
      "511  0.604401  0.000000  0.640  0.070423  0.699335  \n",
      "512  0.695675  0.000000  0.550  0.000000  0.692111  \n",
      "513  0.492997  1.000000  0.250  0.000000  0.457875  \n",
      "514  0.731303  0.000000  0.500  0.070423  0.718409  \n",
      "515  0.795045  0.000000  0.550  0.070423  0.790971  \n",
      "\n",
      "[516 rows x 9603 columns]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "X=pd.concat([otherData,GDELT], axis=1)\n",
    "X.columns = X.columns.astype(str) \n",
    "\n",
    "# 归一化数据\n",
    "scaler = MinMaxScaler()\n",
    "X = X.fillna(0)\n",
    "X= pd.DataFrame(scaler.fit_transform(X), columns=X.columns)\n",
    "\n",
    "print(X)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将数据集划分为训练集、验证集和测试集\n",
    "train_size = int(len(X) * 0.7)\n",
    "valid_size = int(len(X) * 0.2)\n",
    "test_size = len(X) - train_size - valid_size\n",
    "\n",
    "X_train, Y_train= X[:train_size],Y[:train_size]\n",
    "X_valid, Y_valid= X[train_size:(train_size + valid_size)], Y[train_size:(train_size + valid_size)]\n",
    "X_test, Y_test= X[(train_size + valid_size):], Y[(train_size + valid_size):]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def getLossAndPredictions(epochs,batch_size):\n",
    "    \n",
    "    # 2. 构建 LSTM 模型\n",
    "    model = Sequential()\n",
    "    model.add(LSTM(units=50, return_sequences=True, input_shape=(X.shape[1],1)))\n",
    "    model.add(LSTM(units=50))\n",
    "    model.add(Dense(units=1))\n",
    "    \n",
    "    # 3. 模型编译\n",
    "    model.compile(optimizer='adam', loss='mean_squared_error')\n",
    "    # model.compile(optimizer='adam', loss='mean_absolute_error')\n",
    "\n",
    "    # 4. 模型训练\n",
    "    model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_valid, Y_valid))\n",
    "    \n",
    "    # 5. 模型评估\n",
    "    loss = model.evaluate(X_test, Y_test)\n",
    "    # 6. 模型应用（例如，进行预测）\n",
    "    predictions = model.predict(X_test)\n",
    "    \n",
    "    return [loss,predictions]\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[(50, 32), (50, 16), (50, 64), (10, 32), (10, 16), (10, 64), (5, 32), (5, 16), (5, 64)]\n",
      "Epoch 1/50\n",
      "11/12 [==========================>...] - ETA: 1s - loss: 1.3314"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[16], line 21\u001b[0m\n\u001b[0;32m     19\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m params \u001b[38;5;129;01min\u001b[39;00m param_combinations:\n\u001b[0;32m     20\u001b[0m     epochs,batch_size \u001b[38;5;241m=\u001b[39m params\n\u001b[1;32m---> 21\u001b[0m     score,y_pred\u001b[38;5;241m=\u001b[39m\u001b[43mgetLossAndPredictions\u001b[49m\u001b[43m(\u001b[49m\u001b[43mepochs\u001b[49m\u001b[43m,\u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     22\u001b[0m     result\u001b[38;5;241m.\u001b[39mappend([params,score,y_pred])\n\u001b[0;32m     23\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m score \u001b[38;5;241m<\u001b[39m best_score:\n",
      "Cell \u001b[1;32mIn[15], line 14\u001b[0m, in \u001b[0;36mgetLossAndPredictions\u001b[1;34m(epochs, batch_size)\u001b[0m\n\u001b[0;32m     10\u001b[0m model\u001b[38;5;241m.\u001b[39mcompile(optimizer\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124madam\u001b[39m\u001b[38;5;124m'\u001b[39m, loss\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmean_squared_error\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m     11\u001b[0m \u001b[38;5;66;03m# model.compile(optimizer='adam', loss='mean_absolute_error')\u001b[39;00m\n\u001b[0;32m     12\u001b[0m \n\u001b[0;32m     13\u001b[0m \u001b[38;5;66;03m# 4. 模型训练\u001b[39;00m\n\u001b[1;32m---> 14\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mY_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mepochs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalidation_data\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mX_valid\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mY_valid\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     16\u001b[0m \u001b[38;5;66;03m# 5. 模型评估\u001b[39;00m\n\u001b[0;32m     17\u001b[0m loss \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mevaluate(X_test, Y_test)\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\keras\\utils\\traceback_utils.py:65\u001b[0m, in \u001b[0;36mfilter_traceback.<locals>.error_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m     63\u001b[0m filtered_tb \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m     64\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m---> 65\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m fn(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m     66\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m     67\u001b[0m     filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\keras\\engine\\training.py:1564\u001b[0m, in \u001b[0;36mModel.fit\u001b[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[0;32m   1556\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m tf\u001b[38;5;241m.\u001b[39mprofiler\u001b[38;5;241m.\u001b[39mexperimental\u001b[38;5;241m.\u001b[39mTrace(\n\u001b[0;32m   1557\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtrain\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m   1558\u001b[0m     epoch_num\u001b[38;5;241m=\u001b[39mepoch,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   1561\u001b[0m     _r\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m,\n\u001b[0;32m   1562\u001b[0m ):\n\u001b[0;32m   1563\u001b[0m     callbacks\u001b[38;5;241m.\u001b[39mon_train_batch_begin(step)\n\u001b[1;32m-> 1564\u001b[0m     tmp_logs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain_function\u001b[49m\u001b[43m(\u001b[49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1565\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m data_handler\u001b[38;5;241m.\u001b[39mshould_sync:\n\u001b[0;32m   1566\u001b[0m         context\u001b[38;5;241m.\u001b[39masync_wait()\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\tensorflow\\python\\util\\traceback_utils.py:150\u001b[0m, in \u001b[0;36mfilter_traceback.<locals>.error_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m    148\u001b[0m filtered_tb \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m    149\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 150\u001b[0m   \u001b[38;5;28;01mreturn\u001b[39;00m fn(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    151\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    152\u001b[0m   filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py:915\u001b[0m, in \u001b[0;36mFunction.__call__\u001b[1;34m(self, *args, **kwds)\u001b[0m\n\u001b[0;32m    912\u001b[0m compiler \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mxla\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_jit_compile \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnonXla\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    914\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m OptionalXlaContext(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_jit_compile):\n\u001b[1;32m--> 915\u001b[0m   result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwds)\n\u001b[0;32m    917\u001b[0m new_tracing_count \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexperimental_get_tracing_count()\n\u001b[0;32m    918\u001b[0m without_tracing \u001b[38;5;241m=\u001b[39m (tracing_count \u001b[38;5;241m==\u001b[39m new_tracing_count)\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py:947\u001b[0m, in \u001b[0;36mFunction._call\u001b[1;34m(self, *args, **kwds)\u001b[0m\n\u001b[0;32m    944\u001b[0m   \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock\u001b[38;5;241m.\u001b[39mrelease()\n\u001b[0;32m    945\u001b[0m   \u001b[38;5;66;03m# In this case we have created variables on the first call, so we run the\u001b[39;00m\n\u001b[0;32m    946\u001b[0m   \u001b[38;5;66;03m# defunned version which is guaranteed to never create variables.\u001b[39;00m\n\u001b[1;32m--> 947\u001b[0m   \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_stateless_fn(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwds)  \u001b[38;5;66;03m# pylint: disable=not-callable\u001b[39;00m\n\u001b[0;32m    948\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_stateful_fn \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    949\u001b[0m   \u001b[38;5;66;03m# Release the lock early so that multiple threads can perform the call\u001b[39;00m\n\u001b[0;32m    950\u001b[0m   \u001b[38;5;66;03m# in parallel.\u001b[39;00m\n\u001b[0;32m    951\u001b[0m   \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock\u001b[38;5;241m.\u001b[39mrelease()\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\tensorflow\\python\\eager\\function.py:2496\u001b[0m, in \u001b[0;36mFunction.__call__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   2493\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock:\n\u001b[0;32m   2494\u001b[0m   (graph_function,\n\u001b[0;32m   2495\u001b[0m    filtered_flat_args) \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_maybe_define_function(args, kwargs)\n\u001b[1;32m-> 2496\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mgraph_function\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_flat\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m   2497\u001b[0m \u001b[43m    \u001b[49m\u001b[43mfiltered_flat_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcaptured_inputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgraph_function\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcaptured_inputs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\tensorflow\\python\\eager\\function.py:1862\u001b[0m, in \u001b[0;36mConcreteFunction._call_flat\u001b[1;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[0;32m   1858\u001b[0m possible_gradient_type \u001b[38;5;241m=\u001b[39m gradients_util\u001b[38;5;241m.\u001b[39mPossibleTapeGradientTypes(args)\n\u001b[0;32m   1859\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (possible_gradient_type \u001b[38;5;241m==\u001b[39m gradients_util\u001b[38;5;241m.\u001b[39mPOSSIBLE_GRADIENT_TYPES_NONE\n\u001b[0;32m   1860\u001b[0m     \u001b[38;5;129;01mand\u001b[39;00m executing_eagerly):\n\u001b[0;32m   1861\u001b[0m   \u001b[38;5;66;03m# No tape is watching; skip to running the function.\u001b[39;00m\n\u001b[1;32m-> 1862\u001b[0m   \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_build_call_outputs(\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_inference_function\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m   1863\u001b[0m \u001b[43m      \u001b[49m\u001b[43mctx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcancellation_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcancellation_manager\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[0;32m   1864\u001b[0m forward_backward \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_select_forward_and_backward_functions(\n\u001b[0;32m   1865\u001b[0m     args,\n\u001b[0;32m   1866\u001b[0m     possible_gradient_type,\n\u001b[0;32m   1867\u001b[0m     executing_eagerly)\n\u001b[0;32m   1868\u001b[0m forward_function, args_with_tangents \u001b[38;5;241m=\u001b[39m forward_backward\u001b[38;5;241m.\u001b[39mforward()\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\tensorflow\\python\\eager\\function.py:499\u001b[0m, in \u001b[0;36m_EagerDefinedFunction.call\u001b[1;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[0;32m    497\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m _InterpolateFunctionError(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m    498\u001b[0m   \u001b[38;5;28;01mif\u001b[39;00m cancellation_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m--> 499\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m \u001b[43mexecute\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecute\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    500\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43mstr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msignature\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mname\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    501\u001b[0m \u001b[43m        \u001b[49m\u001b[43mnum_outputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_num_outputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    502\u001b[0m \u001b[43m        \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    503\u001b[0m \u001b[43m        \u001b[49m\u001b[43mattrs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattrs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    504\u001b[0m \u001b[43m        \u001b[49m\u001b[43mctx\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mctx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    505\u001b[0m   \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    506\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m execute\u001b[38;5;241m.\u001b[39mexecute_with_cancellation(\n\u001b[0;32m    507\u001b[0m         \u001b[38;5;28mstr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msignature\u001b[38;5;241m.\u001b[39mname),\n\u001b[0;32m    508\u001b[0m         num_outputs\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_outputs,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    511\u001b[0m         ctx\u001b[38;5;241m=\u001b[39mctx,\n\u001b[0;32m    512\u001b[0m         cancellation_manager\u001b[38;5;241m=\u001b[39mcancellation_manager)\n",
      "File \u001b[1;32mc:\\Users\\19151\\venv\\lib\\site-packages\\tensorflow\\python\\eager\\execute.py:54\u001b[0m, in \u001b[0;36mquick_execute\u001b[1;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[0;32m     52\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m     53\u001b[0m   ctx\u001b[38;5;241m.\u001b[39mensure_initialized()\n\u001b[1;32m---> 54\u001b[0m   tensors \u001b[38;5;241m=\u001b[39m \u001b[43mpywrap_tfe\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mTFE_Py_Execute\u001b[49m\u001b[43m(\u001b[49m\u001b[43mctx\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_handle\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mop_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m     55\u001b[0m \u001b[43m                                      \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mattrs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_outputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     56\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m core\u001b[38;5;241m.\u001b[39m_NotOkStatusException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m     57\u001b[0m   \u001b[38;5;28;01mif\u001b[39;00m name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "result=list()\n",
    "\n",
    "param_grid = {\n",
    "    'epochs': [50,10,5],\n",
    "    'batch_size':[32,16,64]\n",
    "}\n",
    "\n",
    "# param_grid = {\n",
    "#     'epochs': [50],\n",
    "#     'batch_size':[32]\n",
    "# }\n",
    "\n",
    "param_combinations = list(product(param_grid['epochs'], param_grid['batch_size']))\n",
    "\n",
    "print(param_combinations)\n",
    "best_score = 2*32\n",
    "best_params = None\n",
    "\n",
    "for params in param_combinations:\n",
    "    epochs,batch_size = params\n",
    "    score,y_pred=getLossAndPredictions(epochs,batch_size)\n",
    "    result.append([params,score,y_pred])\n",
    "    if score < best_score:\n",
    "        best_score = score\n",
    "        best_params = params\n",
    "print(best_score)\n",
    "print(best_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 显示网格\n",
    "plt.grid(True)\n",
    "plt.xticks(rotation=45)\n",
    "# 设置y轴范围\n",
    "# plt.ylim(1, 1.5)  \n",
    "\n",
    "plt.rcParams['font.family'] = 'SimHei'  # 替换为你选择的字体\n",
    "\n",
    "for i in result:\n",
    "    label,loss,predictions=i\n",
    "    plt.plot(Y_test.index,predictions,label=str(label))\n",
    "    # print('Test Loss:', loss)\n",
    "\n",
    "# 绘制折线图\n",
    "plt.plot(Y_test.index, Y_test,label='实际值')\n",
    "\n",
    "plt.title('不同超参数的预测结果')\n",
    "plt.xlabel('Date')\n",
    "plt.ylabel('黄金价格 千美元/盎司')\n",
    "\n",
    "plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
    "\n",
    "# 显示图形\n",
    "plt.show()\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result0=result[:3]\n",
    "result1=result[3:6]\n",
    "result2=result[6:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 显示网格\n",
    "plt.grid(True)\n",
    "plt.xticks(rotation=45)\n",
    "# 设置y轴范围\n",
    "plt.ylim(1.2, 1.35)  \n",
    "\n",
    "plt.rcParams['font.family'] = 'SimHei'  # 替换为你选择的字体\n",
    "\n",
    "linestyles=['-.','--',':' ]\n",
    "for i in range(3):\n",
    "    linestyle=linestyles[i]\n",
    "    label,loss,predictions=result0[i]\n",
    "    plt.plot(Y_test.index,predictions,label=str(label),linestyle=linestyle)\n",
    "    # print('Test Loss:', loss)\n",
    "\n",
    "# 绘制折线图\n",
    "plt.plot(Y_test.index, Y_test,label='实际值',linestyle='-')\n",
    "\n",
    "plt.title('不同超参数的部分预测结果')\n",
    "plt.xlabel('Date')\n",
    "plt.ylabel('黄金价格 千美元/盎司')\n",
    "\n",
    "plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
    "\n",
    "# 显示图形\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 显示网格\n",
    "plt.grid(True)\n",
    "plt.xticks(rotation=45)\n",
    "# 设置y轴范围\n",
    "# plt.ylim(1.2, 1.35)  \n",
    "\n",
    "plt.rcParams['font.family'] = 'SimHei'  # 替换为你选择的字体\n",
    "\n",
    "linestyles=['-.','--',':' ]\n",
    "for i in range(3):\n",
    "    linestyle=linestyles[i]\n",
    "    label,loss,predictions=result1[i]\n",
    "    plt.plot(Y_test.index,predictions,label=str(label),linestyle=linestyle)\n",
    "    # print('Test Loss:', loss)\n",
    "\n",
    "# 绘制折线图\n",
    "plt.plot(Y_test.index, Y_test,label='实际值',linestyle='-')\n",
    "\n",
    "plt.title('不同超参数的部分预测结果')\n",
    "plt.xlabel('Date')\n",
    "plt.ylabel('黄金价格 千美元/盎司')\n",
    "\n",
    "plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
    "\n",
    "# 显示图形\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 显示网格\n",
    "plt.grid(True)\n",
    "plt.xticks(rotation=45)\n",
    "# 设置y轴范围\n",
    "# plt.ylim(1.2, 1.35)  \n",
    "\n",
    "plt.rcParams['font.family'] = 'SimHei'  # 替换为你选择的字体\n",
    "\n",
    "linestyles=['-.','--',':' ]\n",
    "for i in range(3):\n",
    "    linestyle=linestyles[i]\n",
    "    label,loss,predictions=result2[i]\n",
    "    plt.plot(Y_test.index,predictions,label=str(label),linestyle=linestyle)\n",
    "    # print('Test Loss:', loss)\n",
    "\n",
    "# 绘制折线图\n",
    "plt.plot(Y_test.index, Y_test,label='实际值',linestyle='-')\n",
    "\n",
    "plt.title('不同超参数的部分预测结果')\n",
    "plt.xlabel('Date')\n",
    "plt.ylabel('黄金价格 千美元/盎司')\n",
    "\n",
    "plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
    "\n",
    "# 显示图形\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in result:\n",
    "    label,loss,predictions=i\n",
    "    if label[0]==best_params[0] and label[1]==best_params[1]:\n",
    "         tmp=i\n",
    "    print(label,loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "XX=otherData\n",
    "XX = XX.fillna(0)\n",
    "XX= pd.DataFrame(scaler.fit_transform(XX), columns=XX.columns)\n",
    "XX_train=XX[:train_size]\n",
    "XX_valid =XX[train_size:(train_size + valid_size)]\n",
    "XX_test =XX[(train_size + valid_size):]\n",
    "\n",
    "epochs=best_params[0]\n",
    "batch_size=best_params[1]\n",
    "\n",
    "# 2. 构建 LSTM 模型\n",
    "model = Sequential()\n",
    "model.add(LSTM(units=50, return_sequences=True, input_shape=(XX.shape[1],1)))\n",
    "model.add(LSTM(units=50))\n",
    "model.add(Dense(units=1))\n",
    "\n",
    "# 3. 模型编译\n",
    "model.compile(optimizer='adam', loss='mean_squared_error')\n",
    "model.fit(XX_train, Y_train, epochs=epochs, batch_size=batch_size, validation_data=(XX_valid, Y_valid))\n",
    "\n",
    "# 5. 模型评估\n",
    "loss2 = model.evaluate(XX_test, Y_test)\n",
    "\n",
    "# 6. 模型应用（例如，进行预测）\n",
    "predictions2 = model.predict(XX_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tmp=result[0]\n",
    "for i in result:\n",
    "    label,loss,predictions=i\n",
    "    if label[0]==best_params[0] and label[1]==best_params[1]:\n",
    "         tmp=i\n",
    "label,loss1,predictions1=tmp\n",
    "print(tmp)\n",
    "\n",
    "# 绘制折线图\n",
    "plt.rcParams['font.family'] = 'SimHei'  # 替换为你选择的字体\n",
    "plt.plot(Y_test.index, Y_test,label='实际值')\n",
    "plt.plot(Y_test.index,predictions1,label=' A 组',linestyle='--')\n",
    "plt.plot(Y_test.index,predictions2,label=' B 组',linestyle=':')\n",
    "\n",
    "# 显示网格\n",
    "plt.grid(True)\n",
    "plt.xticks(rotation=45)\n",
    "\n",
    "# 设置y轴范围\n",
    "# plt.ylim(800, 1500)  \n",
    "plt.legend()\n",
    "\n",
    "plt.title('A 组和 B 组的预测结果')\n",
    "plt.xlabel('Date')\n",
    "plt.ylabel('黄金价格 千美元/盎司')\n",
    "\n",
    "# 显示图形\n",
    "plt.show()\n",
    "\n",
    "print('Test Loss1:', loss1)\n",
    "print('Test Loss2:', loss2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "j= np.array(Y_test)\n",
    "for i in [predictions1,predictions2,Y_test]:\n",
    "    # 示例数组\n",
    "    data = np.array(i)\n",
    "    # 计算方差\n",
    "    variance = np.var(data)\n",
    "    print(\"均方差:\", variance)\n",
    "    mae = np.mean(np.abs(j- i))\n",
    "    print(\"平均绝对误差:\",mae)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
