{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "1259b655-cb53-4a6e-8899-b3acb4a66560",
   "metadata": {},
   "source": [
    "# 激活函数及其导数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d03e8432-0d0e-4956-8809-7bca0b6de217",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "def sigmoid(x):\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "def sigmoid_derivative(x):\n",
    "    return sigmoid(x) * (1 - sigmoid(x))\n",
    "\n",
    "def relu(x):\n",
    "    return np.maximum(0, x)\n",
    "\n",
    "def relu_derivative(x):\n",
    "    return np.where(x <= 0, 0, 1)\n",
    "\n",
    "# softmax函数用于多类别的输出层\n",
    "def softmax(x):\n",
    "    exps = np.exp(x - np.max(x, axis=1, keepdims=True))\n",
    "    return exps / np.sum(exps, axis=1, keepdims=True)\n",
    "\n",
    "# 交叉熵损失函数\n",
    "def cross_entropy_loss(y_pred, y_true):\n",
    "    m = y_true.shape[0]\n",
    "    loss = -np.sum(y_true * np.log(y_pred + 1e-9)) / m\n",
    "    return loss\n",
    "\n",
    "# MSE损失函数及其导数\n",
    "def mse_loss(y_pred, y_true):\n",
    "    return np.mean((y_pred - y_true) ** 2)\n",
    "\n",
    "def mse_loss_derivative(y_pred, y_true):\n",
    "    return y_pred - y_true\n",
    "\n",
    "\n",
    "# 将类别向量转换为二进制（独热编码）类矩阵\n",
    "def to_categorical(y, num_classes):\n",
    "    return np.eye(num_classes)[y.reshape(-1)]\n",
    "\n",
    "def linear_activation(x):\n",
    "    return x\n",
    "\n",
    "def linear_derivative(x):\n",
    "    return np.ones_like(x)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cb186709-5fc2-4f73-b946-289a8735331a",
   "metadata": {},
   "source": [
    "# MLP类定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "1ceaf678-3b10-424d-9cd5-242a95c2c9b3",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "class MLP:\n",
    "    def __init__(self, layer_sizes, activation='linear', optimizer=None):\n",
    "        self.layer_sizes = layer_sizes\n",
    "        self.activation = activation\n",
    "        self.optimizer = optimizer\n",
    "        self.weights = [np.random.randn(x, y) * np.sqrt(2. / x) for x, y in zip(layer_sizes[:-1], layer_sizes[1:])]\n",
    "\n",
    "        self.biases = [np.zeros((1, y)) for y in layer_sizes[1:]]\n",
    "        \n",
    "        if activation == 'relu':\n",
    "            self.activation_func = relu\n",
    "            self.activation_derivative = relu_derivative\n",
    "        elif activation== 'sigmoid':\n",
    "            self.activation_func = sigmoid\n",
    "            self.activation_derivative = sigmoid_derivative\n",
    "        else:\n",
    "            self.activation_func = linear_activation\n",
    "            self.activation_derivative = linear_derivative\n",
    "\n",
    "    def forward(self, X):\n",
    "        self.A = [X]\n",
    "        for i, (w, b) in enumerate(zip(self.weights, self.biases)):\n",
    "            #print(f\"Layer {i}: input shape: {self.A[-1].shape}, weight shape: {w.shape}\")\n",
    "            z = np.dot(self.A[-1], w) + b\n",
    "            if i < len(self.weights) - 1:\n",
    "                self.A.append(self.activation_func(z))\n",
    "            else:\n",
    "                self.A.append(self.activation_func(z))\n",
    "        return self.A[-1]\n",
    "\n",
    "    def backward(self, X, Y):\n",
    "    # 假设Y已经是独热编码格式了，因此不需要再转换\n",
    "        m = Y.shape[0]\n",
    "        \n",
    "        self.forward(X)\n",
    "        dA = mse_loss_derivative(self.A[-1], Y)\n",
    "        dWs = []\n",
    "        dBs = []\n",
    "        for i in reversed(range(len(self.weights))):\n",
    "            dW = np.dot(self.A[i].T, dA) / m\n",
    "            dB = np.sum(dA, axis=0, keepdims=True) / m\n",
    "            dWs.insert(0, dW)\n",
    "            dBs.insert(0, dB)\n",
    "            if i > 0:\n",
    "                dA = np.dot(dA, self.weights[i].T) * self.activation_derivative(self.A[i])\n",
    "        if self.optimizer:\n",
    "            self.optimizer.update(self.weights + self.biases, dWs + dBs)\n",
    "        else:\n",
    "            for i in range(len(self.weights)):\n",
    "                self.weights[i] -= self.learning_rate * dWs[i]\n",
    "                self.biases[i] -= self.learning_rate * dBs[i]\n",
    "\n",
    "    def train(self, X, Y, epochs, learning_rate=0.01):\n",
    "        self.learning_rate = learning_rate  # 确保有学习率\n",
    "        for epoch in range(epochs):\n",
    "            self.backward(X, Y)  # 使用MSE损失函数\n",
    "            if epoch % 100 == 0:\n",
    "                y_pred = self.forward(X)\n",
    "                loss = mse_loss(y_pred, Y)\n",
    "                mae = mean_absolute_error(Y, y_pred)\n",
    "                print(f\"Epoch {epoch}, Loss: {loss}, MAE: {mae}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0b15c52e-2867-4771-81ef-00078883ed34",
   "metadata": {},
   "source": [
    "# 优化器"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d7dc619e-8d36-4c9b-b3a7-6b9f5cf4cd73",
   "metadata": {},
   "source": [
    "## 优化器基类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ecbf2330-086d-415c-9eb2-e6d0ce4cda7a",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Optimizer:\n",
    "    def __init__(self, learning_rate=0.01):\n",
    "        self.learning_rate = learning_rate  # 初始化学习率\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        # 更新参数的方法，需要在子类中实现\n",
    "        raise NotImplementedError(\"This method should be overridden by subclasses.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "28c83fd7-5d3d-4db9-9cae-8bb79d502a7d",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## 带动量的SGD（随机梯度下降）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "cf807216-ae0d-49bd-80e5-bcf0db5eddfa",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SGDMomentum(Optimizer):\n",
    "    def __init__(self, learning_rate=0.01, momentum=0.9):\n",
    "        super().__init__(learning_rate)  # 调用基类的构造函数来设置学习率\n",
    "        self.momentum = momentum  # 动量因子\n",
    "        self.velocity = None  # 初始化速度为None，后续会根据参数形状进行初始化\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.velocity is None:\n",
    "            # 在第一次调用时根据参数的形状初始化速度\n",
    "            self.velocity = [np.zeros_like(p) for p in params]\n",
    "\n",
    "        for v, p, g in zip(self.velocity, params, grads):\n",
    "            v[:] = self.momentum * v - self.learning_rate * g  # 计算速度更新\n",
    "            p += v  # 更新参数\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7baf17c0-996d-44e8-befb-2517f8525db8",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Adagrad（自适应梯度算法）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "3c91f184-8f56-419d-af32-4987191d42a3",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Adagrad(Optimizer):\n",
    "    def __init__(self, learning_rate=0.01, epsilon=1e-8):\n",
    "        super().__init__(learning_rate)  # 初始化学习率\n",
    "        self.epsilon = epsilon  # 避免除以0的小量\n",
    "        self.cache = None  # 用于累积过去梯度的平方\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.cache is None:\n",
    "            # 根据参数形状初始化cache\n",
    "            self.cache = [np.zeros_like(p) for p in params]\n",
    "\n",
    "        for cache, p, g in zip(self.cache, params, grads):\n",
    "            cache[:] += g ** 2  # 累积梯度的平方\n",
    "            p -= self.learning_rate * g / (np.sqrt(cache) + self.epsilon)  # 调整学习步长并更新参数\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2fc00cb1-1582-41f3-b571-67215237dff3",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Adam（自适应矩估计算法）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "b864e01c-2426-48c3-be66-e4f23ee3ee48",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Adam(Optimizer):\n",
    "    def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8):\n",
    "        super().__init__(learning_rate)  # 初始化学习率\n",
    "        self.beta1 = beta1  # 一阶矩估计的指数衰减率\n",
    "        self.beta2 = beta2  # 二阶矩估计的指数衰减率\n",
    "        self.epsilon = epsilon  # 避免除以0\n",
    "        self.m = None  # 一阶矩估计\n",
    "        self.v = None  # 二阶矩估计\n",
    "        self.t = 0  # 初始化时间步\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.m is None:\n",
    "            # 第一次调用时根据参数形状初始化m和v\n",
    "            self.m = [np.zeros_like(p) for p in params]\n",
    "            self.v = [np.zeros_like(p) for p in params]\n",
    "\n",
    "        self.t += 1  # 更新时间步\n",
    "        for m, v, p, g in zip(self.m, self.v, params, grads):\n",
    "            m[:] = self.beta1 * m + (1 - self.beta1) * g  # 更新一阶矩估计\n",
    "            v[:] = self.beta2 * v + (1 - self.beta2) * (g ** 2)  # 更新二阶矩估计\n",
    "            m_hat = m / (1 - self.beta1 ** self.t)  # 计算偏差校正后的一阶矩估计\n",
    "            v_hat = v / (1 - self.beta2 ** self.t)  # 计算偏差校正后的二阶矩估计\n",
    "            p -= self.learning_rate * m_hat / (np.sqrt(v_hat) + self.epsilon)  # 更新参数\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ee5ee648-6c52-4fdb-ab64-39521d9a9b29",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## 在SGDMomentum中加入Elastic Net正则化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "af921732-0dec-4695-89e6-1af30afde76d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SGDMomentumElasticNet(SGDMomentum):\n",
    "    def __init__(self, learning_rate=0.01, momentum=0.9, lambda_l1=0.0, lambda_l2=0.0):\n",
    "        super().__init__(learning_rate, momentum)\n",
    "        self.lambda_l1 = lambda_l1  # L1正则化系数\n",
    "        self.lambda_l2 = lambda_l2  # L2正则化系数\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.velocity is None:\n",
    "            self.velocity = [np.zeros_like(p) for p in params]\n",
    "\n",
    "        for i, (v, p, g) in enumerate(zip(self.velocity, params, grads)):\n",
    "            # 加入Elastic Net正则化梯度（L1 + L2）\n",
    "            g += self.lambda_l1 * np.sign(p) + 2 * self.lambda_l2 * p\n",
    "\n",
    "            v[:] = self.momentum * v - self.learning_rate * g\n",
    "            params[i] += v\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "db388cf0-032d-47ec-8f9a-c5bbc49f4b0e",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## 在Adagrad中加入Elastic Net正则化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "507fdd41-c89a-452a-a9f7-cea2dbc73e0d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class AdagradElasticNet(Adagrad):\n",
    "    def __init__(self, learning_rate=0.01, epsilon=1e-8, lambda_l1=0.0, lambda_l2=0.0):\n",
    "        super().__init__(learning_rate, epsilon)\n",
    "        self.lambda_l1 = lambda_l1  # L1正则化系数\n",
    "        self.lambda_l2 = lambda_l2  # L2正则化系数\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.cache is None:\n",
    "            self.cache = [np.zeros_like(p) for p in params]\n",
    "\n",
    "        for i, (cache, p, g) in enumerate(zip(self.cache, params, grads)):\n",
    "            # 加入Elastic Net正则化梯度（L1 + L2）\n",
    "            g += self.lambda_l1 * np.sign(p) + 2 * self.lambda_l2 * p\n",
    "\n",
    "            cache[:] += g ** 2\n",
    "            params[i] -= self.learning_rate * g / (np.sqrt(cache) + self.epsilon)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8372b146-b160-46f2-acc9-5615671a986b",
   "metadata": {},
   "source": [
    "## 在Adam中加入Elastic Net正则化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "0db194f8-4e54-43e5-9796-8c09ce522997",
   "metadata": {},
   "outputs": [],
   "source": [
    "class AdamElasticNet(Adam):\n",
    "    def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, lambda_l1=0.0, lambda_l2=0.0):\n",
    "        super().__init__(learning_rate, beta1, beta2, epsilon)\n",
    "        self.lambda_l1 = lambda_l1  # L1正则化系数\n",
    "        self.lambda_l2 = lambda_l2  # L2正则化系数\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.m is None or self.v is None:\n",
    "        # 第一次调用时根据参数形状初始化m和v\n",
    "            self.m = [np.zeros_like(p) for p in params]\n",
    "            self.v = [np.zeros_like(p) for p in params]\n",
    "        \n",
    "        self.t += 1\n",
    "        for i, (m, v, p, g) in enumerate(zip(self.m, self.v, params, grads)):\n",
    "            # 加入Elastic Net正则化梯度（L1 + L2）\n",
    "            g += self.lambda_l1 * np.sign(p) + 2 * self.lambda_l2 * p\n",
    "            m[:] = self.beta1 * m + (1 - self.beta1) * g\n",
    "            v[:] = self.beta2 * v + (1 - self.beta2) * (g ** 2)\n",
    "            m_hat = m / (1 - self.beta1 ** self.t)\n",
    "            v_hat = v / (1 - self.beta2 ** self.t)\n",
    "            p -= self.learning_rate * m_hat / (np.sqrt(v_hat) + self.epsilon)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c0e30792-6a1e-4cc2-baf3-c0b44b9a8cc4",
   "metadata": {},
   "source": [
    "# 模型评估方法定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "a7a72ebd-037f-4b16-9f47-3b2f2213ecb3",
   "metadata": {},
   "outputs": [],
   "source": [
    "def confusion_matrix(y_true, y_pred):\n",
    "    \"\"\"\n",
    "    计算混淆矩阵。\n",
    "    y_true: 真实标签数组。\n",
    "    y_pred: 预测标签数组。\n",
    "    \"\"\"\n",
    "    unique_labels = np.unique(y_true)\n",
    "    matrix = np.zeros((len(unique_labels), len(unique_labels)), dtype=int)\n",
    "    for true_label, pred_label in zip(y_true, y_pred):\n",
    "        matrix[true_label, pred_label] += 1\n",
    "    return matrix\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "c505e566-efc8-47eb-a74e-8ccc12edf0c7",
   "metadata": {},
   "outputs": [],
   "source": [
    "def roc_curve(y_true, y_score):\n",
    "    \"\"\"\n",
    "    计算ROC曲线的FPR和TPR。\n",
    "    y_true: 真实标签数组。\n",
    "    y_score: 预测得分或概率数组。\n",
    "    \"\"\"\n",
    "    thresholds = np.unique(y_score)\n",
    "    tpr = []\n",
    "    fpr = []\n",
    "    \n",
    "    for threshold in thresholds:\n",
    "        y_pred = (y_score >= threshold).astype(int)\n",
    "        FP = np.sum((y_pred == 1) & (y_true == 0))\n",
    "        TP = np.sum((y_pred == 1) & (y_true == 1))\n",
    "        FN = np.sum((y_pred == 0) & (y_true == 1))\n",
    "        TN = np.sum((y_pred == 0) & (y_true == 0))\n",
    "        \n",
    "        tpr.append(TP / (TP + FN))\n",
    "        fpr.append(FP / (FP + TN))\n",
    "    \n",
    "    return np.array(fpr), np.array(tpr), thresholds\n",
    "\n",
    "def auc(fpr, tpr):\n",
    "    \"\"\"\n",
    "    计算AUC值。\n",
    "    fpr: 假正率数组。\n",
    "    tpr: 真正率数组。\n",
    "    \"\"\"\n",
    "    return np.trapz(tpr, fpr)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "d14b574d-9305-428d-99db-f9ec4e0c33bc",
   "metadata": {},
   "outputs": [],
   "source": [
    "def cross_validation_split(dataset, folds=3):\n",
    "    \"\"\"\n",
    "    将数据集分割为训练集和测试集。\n",
    "    dataset: 数据集。\n",
    "    folds: 折数。\n",
    "    \"\"\"\n",
    "    dataset_split = []\n",
    "    dataset_copy = list(dataset)\n",
    "    fold_size = int(len(dataset) / folds)\n",
    "    for _ in range(folds):\n",
    "        fold = []\n",
    "        while len(fold) < fold_size:\n",
    "            index = randrange(len(dataset_copy))\n",
    "            fold.append(dataset_copy.pop(index))\n",
    "        dataset_split.append(fold)\n",
    "    return dataset_split\n",
    "\n",
    "def cross_validate(model, data, labels, folds=3):\n",
    "    \"\"\"\n",
    "    进行交叉验证。\n",
    "    model: 使用的模型。\n",
    "    data: 数据集。\n",
    "    labels: 标签。\n",
    "    folds: 折数。\n",
    "    \"\"\"\n",
    "    results = []\n",
    "    cv_splits = cross_validation_split(list(zip(data, labels)), folds)\n",
    "    \n",
    "    for fold in cv_splits:\n",
    "        train_set = list(cv_splits)\n",
    "        train_set.remove(fold)\n",
    "        train_set = sum(train_set, [])\n",
    "        test_set = list(fold)\n",
    "        \n",
    "        train_data, train_labels = zip(*train_set)\n",
    "        test_data, test_labels = zip(*test_set)\n",
    "        \n",
    "        model.fit(np.array(train_data), np.array(train_labels))\n",
    "        predictions = model.predict(np.array(test_data))\n",
    "        \n",
    "        accuracy = np.mean(predictions == np.array(test_labels))\n",
    "        results.append(accuracy)\n",
    "    \n",
    "    return results\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "5a72f301-f6b7-480f-b0fe-7711c902da61",
   "metadata": {},
   "outputs": [],
   "source": [
    "def mean_absolute_error(y_true, y_pred):\n",
    "    return np.mean(np.abs(y_pred - y_true))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2bb34099-551d-4b8f-a12d-f572de84aefe",
   "metadata": {},
   "source": [
    "# 模型训练"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "30963cbf-7e18-42d6-9862-88927dec531c",
   "metadata": {},
   "source": [
    "## 自己写的MLP"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "39c5f848-601e-4430-ad80-be24ffe90f8d",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.keras.datasets import boston_housing\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "# 加载数据集\n",
    "(X, y), (X_test, y_test) = boston_housing.load_data()\n",
    "\n",
    "# 分割数据集，这里再次分割训练集以得到训练集和验证集\n",
    "X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# 特征标准化\n",
    "scaler = StandardScaler()\n",
    "X_train_scaled = scaler.fit_transform(X_train)\n",
    "X_val_scaled = scaler.transform(X_val)\n",
    "X_test_scaled = scaler.transform(X_test)\n",
    "\n",
    "# 您现在可以继续使用这些数据进行模型训练\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "24f2b060-2379-4376-b199-7139d7ef289c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Loss: 321.9147907141637, MAE: 15.601790163442976\n",
      "Epoch 100, Loss: 17.01758372386906, MAE: 3.3804264729673528\n",
      "Epoch 200, Loss: 9.72772747400491, MAE: 2.5231701124135912\n",
      "Epoch 300, Loss: 7.282546044756762, MAE: 2.182618389020681\n",
      "Epoch 400, Loss: 5.836305319147132, MAE: 1.9454085949351112\n",
      "Epoch 500, Loss: 5.029872159802157, MAE: 1.8090723705318723\n",
      "Epoch 600, Loss: 4.281467491985871, MAE: 1.6658002059821697\n",
      "Epoch 700, Loss: 3.9003247068322824, MAE: 1.6014433246917619\n",
      "Epoch 800, Loss: 3.458942729004839, MAE: 1.5100695715414396\n",
      "Epoch 900, Loss: 3.165802003621319, MAE: 1.4454264894678543\n",
      "Epoch 1000, Loss: 2.8291944390607937, MAE: 1.3655874004956\n",
      "Epoch 1100, Loss: 2.6046928012279764, MAE: 1.3127473877982354\n",
      "Epoch 1200, Loss: 2.4003985378069834, MAE: 1.262602285179911\n",
      "Epoch 1300, Loss: 2.323138124739325, MAE: 1.2510936791195064\n",
      "Epoch 1400, Loss: 2.07242427782006, MAE: 1.175349598891657\n",
      "Epoch 1500, Loss: 1.9802974420031423, MAE: 1.1562456826242853\n",
      "Epoch 1600, Loss: 1.8422771604877333, MAE: 1.1136354778968642\n",
      "Epoch 1700, Loss: 1.7093606364809166, MAE: 1.06827814697418\n",
      "Epoch 1800, Loss: 1.5729953676528636, MAE: 1.0246695409818396\n",
      "Epoch 1900, Loss: 1.4651346885970296, MAE: 0.9908584606539645\n"
     ]
    }
   ],
   "source": [
    "# 定义模型和优化器\n",
    "optimizer = AdamElasticNet(learning_rate=0.001, lambda_l1=0.001, lambda_l2=0.001)\n",
    "model_selfwrite = MLP(layer_sizes=[X_train_scaled.shape[1], 128, 64, 1], activation='relu')\n",
    "# 训练模型\n",
    "epochs = 2000  # 根据需要调整epochs的数量\n",
    "model_selfwrite.train(X_train_scaled, y_train.reshape(-1, 1), epochs=epochs)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "15554d88-5c8a-41cf-bf78-6fbe94dc762b",
   "metadata": {},
   "source": [
    "## TensorFlow"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "f567563f-1d88-427b-838e-155dc7690983",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/200\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Software\\Python\\Lib\\site-packages\\keras\\src\\layers\\core\\dense.py:86: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(activity_regularizer=activity_regularizer, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 11ms/step - loss: 538.1135 - mae: 21.5715 - val_loss: 589.3805 - val_mae: 22.4769\n",
      "Epoch 2/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 515.5797 - mae: 20.7673 - val_loss: 527.4846 - val_mae: 21.0985\n",
      "Epoch 3/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 435.8294 - mae: 18.9716 - val_loss: 444.1440 - val_mae: 19.0911\n",
      "Epoch 4/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 345.5679 - mae: 16.5771 - val_loss: 334.9797 - val_mae: 16.1820\n",
      "Epoch 5/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 237.2754 - mae: 13.3523 - val_loss: 213.7744 - val_mae: 12.1838\n",
      "Epoch 6/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 152.5839 - mae: 9.8850 - val_loss: 119.8161 - val_mae: 8.4001\n",
      "Epoch 7/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 75.7787 - mae: 6.9059 - val_loss: 74.9917 - val_mae: 6.5657\n",
      "Epoch 8/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 48.7925 - mae: 5.3015 - val_loss: 57.3246 - val_mae: 5.7487\n",
      "Epoch 9/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 40.6471 - mae: 4.5870 - val_loss: 44.7683 - val_mae: 5.0190\n",
      "Epoch 10/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 32.9110 - mae: 4.2759 - val_loss: 37.0888 - val_mae: 4.5141\n",
      "Epoch 11/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 28.7125 - mae: 3.7960 - val_loss: 32.5466 - val_mae: 4.2131\n",
      "Epoch 12/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 23.1769 - mae: 3.4261 - val_loss: 29.7236 - val_mae: 4.0061\n",
      "Epoch 13/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 25.2026 - mae: 3.3763 - val_loss: 27.4965 - val_mae: 3.9370\n",
      "Epoch 14/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 22.2290 - mae: 3.2928 - val_loss: 26.0586 - val_mae: 3.8376\n",
      "Epoch 15/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 17.5102 - mae: 3.0436 - val_loss: 25.0592 - val_mae: 3.7309\n",
      "Epoch 16/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 20.6298 - mae: 3.0888 - val_loss: 24.3190 - val_mae: 3.6552\n",
      "Epoch 17/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 25.9481 - mae: 3.2120 - val_loss: 23.5067 - val_mae: 3.5516\n",
      "Epoch 18/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 18.0724 - mae: 2.8498 - val_loss: 22.7628 - val_mae: 3.5149\n",
      "Epoch 19/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 19.0020 - mae: 2.9688 - val_loss: 22.1106 - val_mae: 3.4788\n",
      "Epoch 20/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 18.4996 - mae: 2.8295 - val_loss: 21.6943 - val_mae: 3.4524\n",
      "Epoch 21/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 19.9974 - mae: 2.9495 - val_loss: 21.3232 - val_mae: 3.4267\n",
      "Epoch 22/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 14.8059 - mae: 2.7841 - val_loss: 20.4965 - val_mae: 3.3203\n",
      "Epoch 23/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 14.7094 - mae: 2.6151 - val_loss: 20.1235 - val_mae: 3.2602\n",
      "Epoch 24/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 16.3054 - mae: 2.6130 - val_loss: 19.6124 - val_mae: 3.2010\n",
      "Epoch 25/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 18.4070 - mae: 2.7541 - val_loss: 19.0701 - val_mae: 3.0808\n",
      "Epoch 26/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 11.3847 - mae: 2.3443 - val_loss: 18.5806 - val_mae: 3.0567\n",
      "Epoch 27/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 13.5015 - mae: 2.4521 - val_loss: 18.9036 - val_mae: 3.0933\n",
      "Epoch 28/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 14.1095 - mae: 2.5600 - val_loss: 18.9597 - val_mae: 3.1893\n",
      "Epoch 29/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.9160 - mae: 2.3606 - val_loss: 18.1472 - val_mae: 3.0670\n",
      "Epoch 30/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 12.4918 - mae: 2.3616 - val_loss: 18.0315 - val_mae: 2.9768\n",
      "Epoch 31/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.6117 - mae: 2.3464 - val_loss: 17.7352 - val_mae: 2.9380\n",
      "Epoch 32/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 11.2229 - mae: 2.2838 - val_loss: 17.7922 - val_mae: 2.9581\n",
      "Epoch 33/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 14.4887 - mae: 2.4159 - val_loss: 17.3035 - val_mae: 2.9723\n",
      "Epoch 34/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 10.6427 - mae: 2.1879 - val_loss: 17.0926 - val_mae: 2.9179\n",
      "Epoch 35/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.2894 - mae: 2.2319 - val_loss: 17.1504 - val_mae: 2.8673\n",
      "Epoch 36/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 12.0737 - mae: 2.3853 - val_loss: 17.2417 - val_mae: 2.8633\n",
      "Epoch 37/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 10.5105 - mae: 2.2333 - val_loss: 16.9398 - val_mae: 2.8968\n",
      "Epoch 38/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 14.3704 - mae: 2.3495 - val_loss: 17.2061 - val_mae: 2.8655\n",
      "Epoch 39/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 13.4902 - mae: 2.2686 - val_loss: 17.0387 - val_mae: 2.8492\n",
      "Epoch 40/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.3193 - mae: 2.2332 - val_loss: 16.9623 - val_mae: 2.9020\n",
      "Epoch 41/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 13.9710 - mae: 2.4268 - val_loss: 16.7085 - val_mae: 2.7949\n",
      "Epoch 42/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 10.6896 - mae: 2.2638 - val_loss: 17.3274 - val_mae: 2.9820\n",
      "Epoch 43/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 10.3839 - mae: 2.2318 - val_loss: 17.9306 - val_mae: 2.8563\n",
      "Epoch 44/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 10.5586 - mae: 2.2718 - val_loss: 16.6438 - val_mae: 2.8089\n",
      "Epoch 45/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 12.7090 - mae: 2.2205 - val_loss: 16.6507 - val_mae: 2.9173\n",
      "Epoch 46/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 9.6724 - mae: 2.1269 - val_loss: 16.3520 - val_mae: 2.8591\n",
      "Epoch 47/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.0670 - mae: 2.0989 - val_loss: 16.7406 - val_mae: 2.7716\n",
      "Epoch 48/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 13.1339 - mae: 2.2787 - val_loss: 16.9489 - val_mae: 2.8172\n",
      "Epoch 49/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 9.3326 - mae: 2.1216 - val_loss: 17.0957 - val_mae: 2.8306\n",
      "Epoch 50/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 13.8987 - mae: 2.4773 - val_loss: 20.5627 - val_mae: 3.0297\n",
      "Epoch 51/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 10.3307 - mae: 2.2928 - val_loss: 16.9460 - val_mae: 2.8497\n",
      "Epoch 52/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.2819 - mae: 2.1322 - val_loss: 16.2370 - val_mae: 2.8566\n",
      "Epoch 53/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.6226 - mae: 1.9639 - val_loss: 16.1053 - val_mae: 2.7865\n",
      "Epoch 54/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 9.0378 - mae: 2.0081 - val_loss: 16.3453 - val_mae: 2.7393\n",
      "Epoch 55/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.8176 - mae: 2.2826 - val_loss: 16.1686 - val_mae: 2.7452\n",
      "Epoch 56/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 10.0816 - mae: 2.1450 - val_loss: 15.8385 - val_mae: 2.7719\n",
      "Epoch 57/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.8688 - mae: 1.9891 - val_loss: 15.9429 - val_mae: 2.7397\n",
      "Epoch 58/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.2780 - mae: 1.9467 - val_loss: 16.4754 - val_mae: 2.7383\n",
      "Epoch 59/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 9.2797 - mae: 2.1002 - val_loss: 18.3947 - val_mae: 2.8104\n",
      "Epoch 60/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.6538 - mae: 2.0412 - val_loss: 17.3966 - val_mae: 2.8234\n",
      "Epoch 61/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.7976 - mae: 2.1394 - val_loss: 17.3213 - val_mae: 2.7563\n",
      "Epoch 62/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 9.1263 - mae: 2.0884 - val_loss: 15.7455 - val_mae: 2.6918\n",
      "Epoch 63/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.4407 - mae: 1.9595 - val_loss: 15.4647 - val_mae: 2.6862\n",
      "Epoch 64/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 10.6522 - mae: 2.1789 - val_loss: 15.9388 - val_mae: 2.7253\n",
      "Epoch 65/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 8.8564 - mae: 1.9293 - val_loss: 15.6491 - val_mae: 2.7289\n",
      "Epoch 66/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 9.7261 - mae: 2.0086 - val_loss: 15.3662 - val_mae: 2.7002\n",
      "Epoch 67/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 10.2996 - mae: 2.0676 - val_loss: 15.7288 - val_mae: 2.7189\n",
      "Epoch 68/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.3580 - mae: 1.9982 - val_loss: 15.7214 - val_mae: 2.6873\n",
      "Epoch 69/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 11.1939 - mae: 2.0773 - val_loss: 15.9260 - val_mae: 2.6898\n",
      "Epoch 70/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 9.0114 - mae: 1.9989 - val_loss: 15.2368 - val_mae: 2.7015\n",
      "Epoch 71/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.1583 - mae: 1.7735 - val_loss: 15.5346 - val_mae: 2.6891\n",
      "Epoch 72/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 9.9931 - mae: 2.0589 - val_loss: 16.3741 - val_mae: 2.6528\n",
      "Epoch 73/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 8.4834 - mae: 1.9877 - val_loss: 15.9458 - val_mae: 2.7149\n",
      "Epoch 74/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 8.0694 - mae: 1.8998 - val_loss: 15.9992 - val_mae: 2.6947\n",
      "Epoch 75/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 10.0583 - mae: 2.0845 - val_loss: 16.0180 - val_mae: 2.6544\n",
      "Epoch 76/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.0535 - mae: 1.9530 - val_loss: 15.5753 - val_mae: 2.6863\n",
      "Epoch 77/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.9149 - mae: 1.9411 - val_loss: 15.8098 - val_mae: 2.7124\n",
      "Epoch 78/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.6169 - mae: 1.8680 - val_loss: 16.0930 - val_mae: 2.6603\n",
      "Epoch 79/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 8.1468 - mae: 1.8867 - val_loss: 15.6652 - val_mae: 2.6558\n",
      "Epoch 80/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.5600 - mae: 1.8348 - val_loss: 15.2315 - val_mae: 2.6923\n",
      "Epoch 81/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.4704 - mae: 1.8940 - val_loss: 15.7462 - val_mae: 2.6337\n",
      "Epoch 82/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.0265 - mae: 1.9456 - val_loss: 16.1394 - val_mae: 2.6720\n",
      "Epoch 83/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 8.0501 - mae: 1.9023 - val_loss: 16.1996 - val_mae: 2.7119\n",
      "Epoch 84/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.9352 - mae: 1.8471 - val_loss: 16.0061 - val_mae: 2.6655\n",
      "Epoch 85/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 8.4749 - mae: 1.8504 - val_loss: 16.2115 - val_mae: 2.6642\n",
      "Epoch 86/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.1847 - mae: 1.7831 - val_loss: 15.6029 - val_mae: 2.6343\n",
      "Epoch 87/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.9853 - mae: 1.8322 - val_loss: 15.6178 - val_mae: 2.6822\n",
      "Epoch 88/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.9261 - mae: 1.8565 - val_loss: 15.4446 - val_mae: 2.6441\n",
      "Epoch 89/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.7797 - mae: 1.7626 - val_loss: 15.6302 - val_mae: 2.6383\n",
      "Epoch 90/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.5968 - mae: 1.8096 - val_loss: 15.9424 - val_mae: 2.6517\n",
      "Epoch 91/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.9777 - mae: 1.9358 - val_loss: 15.9581 - val_mae: 2.6211\n",
      "Epoch 92/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 10.2329 - mae: 1.9731 - val_loss: 15.3487 - val_mae: 2.6193\n",
      "Epoch 93/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.4904 - mae: 1.7534 - val_loss: 15.5017 - val_mae: 2.6301\n",
      "Epoch 94/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.5722 - mae: 1.8854 - val_loss: 15.7304 - val_mae: 2.5754\n",
      "Epoch 95/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 7.1742 - mae: 1.7675 - val_loss: 15.1520 - val_mae: 2.6141\n",
      "Epoch 96/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 10.2936 - mae: 1.9759 - val_loss: 15.2896 - val_mae: 2.6011\n",
      "Epoch 97/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 8.4186 - mae: 1.8844 - val_loss: 15.0946 - val_mae: 2.6044\n",
      "Epoch 98/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.4826 - mae: 1.8459 - val_loss: 14.7601 - val_mae: 2.6436\n",
      "Epoch 99/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.8783 - mae: 1.8647 - val_loss: 15.4444 - val_mae: 2.5950\n",
      "Epoch 100/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.8124 - mae: 1.9055 - val_loss: 15.3983 - val_mae: 2.6178\n",
      "Epoch 101/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 9.4805 - mae: 1.8534 - val_loss: 15.0827 - val_mae: 2.6443\n",
      "Epoch 102/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.8139 - mae: 1.6863 - val_loss: 15.3977 - val_mae: 2.5835\n",
      "Epoch 103/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.0913 - mae: 1.7911 - val_loss: 15.5391 - val_mae: 2.5908\n",
      "Epoch 104/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.7503 - mae: 1.9196 - val_loss: 15.3034 - val_mae: 2.5810\n",
      "Epoch 105/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.0974 - mae: 1.7512 - val_loss: 15.5869 - val_mae: 2.6047\n",
      "Epoch 106/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 9.5840 - mae: 1.9379 - val_loss: 15.4646 - val_mae: 2.5920\n",
      "Epoch 107/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 7.5393 - mae: 1.7691 - val_loss: 15.5245 - val_mae: 2.6428\n",
      "Epoch 108/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.2499 - mae: 1.7811 - val_loss: 15.3625 - val_mae: 2.5794\n",
      "Epoch 109/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.7557 - mae: 1.8705 - val_loss: 15.2267 - val_mae: 2.5893\n",
      "Epoch 110/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 7.1050 - mae: 1.6994 - val_loss: 15.1249 - val_mae: 2.5665\n",
      "Epoch 111/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.9870 - mae: 1.6866 - val_loss: 15.3532 - val_mae: 2.5504\n",
      "Epoch 112/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.2070 - mae: 1.7329 - val_loss: 15.3227 - val_mae: 2.5624\n",
      "Epoch 113/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.3109 - mae: 1.6896 - val_loss: 15.2020 - val_mae: 2.6377\n",
      "Epoch 114/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.7501 - mae: 1.6957 - val_loss: 15.4869 - val_mae: 2.5658\n",
      "Epoch 115/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.0271 - mae: 1.7134 - val_loss: 15.5302 - val_mae: 2.5609\n",
      "Epoch 116/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 8.1530 - mae: 1.7962 - val_loss: 15.3391 - val_mae: 2.5703\n",
      "Epoch 117/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.8063 - mae: 1.7497 - val_loss: 14.2036 - val_mae: 2.5711\n",
      "Epoch 118/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.0566 - mae: 1.7307 - val_loss: 14.5280 - val_mae: 2.4974\n",
      "Epoch 119/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.9096 - mae: 1.7373 - val_loss: 14.8116 - val_mae: 2.5197\n",
      "Epoch 120/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.1246 - mae: 1.7119 - val_loss: 14.7770 - val_mae: 2.5245\n",
      "Epoch 121/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.6562 - mae: 1.5811 - val_loss: 14.3257 - val_mae: 2.5066\n",
      "Epoch 122/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.5681 - mae: 1.7587 - val_loss: 14.6775 - val_mae: 2.4971\n",
      "Epoch 123/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.1147 - mae: 1.6259 - val_loss: 14.9949 - val_mae: 2.5599\n",
      "Epoch 124/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.0129 - mae: 1.6704 - val_loss: 14.8019 - val_mae: 2.5213\n",
      "Epoch 125/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.5209 - mae: 1.7100 - val_loss: 15.0663 - val_mae: 2.5171\n",
      "Epoch 126/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.7454 - mae: 1.6889 - val_loss: 15.1031 - val_mae: 2.5278\n",
      "Epoch 127/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.5215 - mae: 1.9631 - val_loss: 16.5436 - val_mae: 2.5737\n",
      "Epoch 128/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.6235 - mae: 1.6842 - val_loss: 15.5635 - val_mae: 2.5977\n",
      "Epoch 129/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.8744 - mae: 1.7108 - val_loss: 14.9466 - val_mae: 2.5459\n",
      "Epoch 130/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.5614 - mae: 1.6284 - val_loss: 15.3783 - val_mae: 2.5197\n",
      "Epoch 131/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.1514 - mae: 1.7142 - val_loss: 14.9776 - val_mae: 2.5239\n",
      "Epoch 132/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.7806 - mae: 1.6867 - val_loss: 15.0972 - val_mae: 2.5207\n",
      "Epoch 133/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.1679 - mae: 1.6499 - val_loss: 14.8093 - val_mae: 2.5254\n",
      "Epoch 134/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.8643 - mae: 1.6073 - val_loss: 14.9350 - val_mae: 2.4894\n",
      "Epoch 135/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.9191 - mae: 1.6326 - val_loss: 15.2696 - val_mae: 2.5062\n",
      "Epoch 136/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.8367 - mae: 1.6055 - val_loss: 16.0360 - val_mae: 2.5342\n",
      "Epoch 137/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.6451 - mae: 1.7149 - val_loss: 15.2933 - val_mae: 2.5523\n",
      "Epoch 138/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 6.9363 - mae: 1.6526 - val_loss: 15.7425 - val_mae: 2.5111\n",
      "Epoch 139/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.3930 - mae: 1.6551 - val_loss: 15.4125 - val_mae: 2.5250\n",
      "Epoch 140/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.8925 - mae: 1.6376 - val_loss: 15.5816 - val_mae: 2.5132\n",
      "Epoch 141/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.2908 - mae: 1.6692 - val_loss: 16.1311 - val_mae: 2.5305\n",
      "Epoch 142/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.1148 - mae: 1.5716 - val_loss: 15.5097 - val_mae: 2.5083\n",
      "Epoch 143/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.6315 - mae: 1.6251 - val_loss: 16.0337 - val_mae: 2.5190\n",
      "Epoch 144/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.6014 - mae: 1.5798 - val_loss: 15.3301 - val_mae: 2.5095\n",
      "Epoch 145/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.5554 - mae: 1.6581 - val_loss: 15.3799 - val_mae: 2.5067\n",
      "Epoch 146/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 6.3767 - mae: 1.5846 - val_loss: 15.2388 - val_mae: 2.5046\n",
      "Epoch 147/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.8968 - mae: 1.5981 - val_loss: 15.5945 - val_mae: 2.5044\n",
      "Epoch 148/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.8480 - mae: 1.5627 - val_loss: 15.4730 - val_mae: 2.4909\n",
      "Epoch 149/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.2122 - mae: 1.6423 - val_loss: 15.2206 - val_mae: 2.4988\n",
      "Epoch 150/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.5685 - mae: 1.6145 - val_loss: 14.3060 - val_mae: 2.4548\n",
      "Epoch 151/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.0311 - mae: 1.4504 - val_loss: 14.7915 - val_mae: 2.4577\n",
      "Epoch 152/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.3390 - mae: 1.5452 - val_loss: 15.1233 - val_mae: 2.5123\n",
      "Epoch 153/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 8.2632 - mae: 1.7982 - val_loss: 15.2314 - val_mae: 2.4960\n",
      "Epoch 154/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.2036 - mae: 1.4711 - val_loss: 15.2795 - val_mae: 2.4891\n",
      "Epoch 155/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.6379 - mae: 1.6391 - val_loss: 15.3144 - val_mae: 2.4903\n",
      "Epoch 156/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.5435 - mae: 1.5102 - val_loss: 15.5925 - val_mae: 2.5011\n",
      "Epoch 157/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.2064 - mae: 1.5597 - val_loss: 15.5846 - val_mae: 2.5023\n",
      "Epoch 158/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.4864 - mae: 1.4639 - val_loss: 15.5232 - val_mae: 2.5023\n",
      "Epoch 159/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.4317 - mae: 1.5812 - val_loss: 15.6155 - val_mae: 2.4905\n",
      "Epoch 160/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.4213 - mae: 1.4615 - val_loss: 15.5442 - val_mae: 2.4950\n",
      "Epoch 161/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 7.1927 - mae: 1.5654 - val_loss: 15.7417 - val_mae: 2.5123\n",
      "Epoch 162/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.8288 - mae: 1.5639 - val_loss: 15.6707 - val_mae: 2.5155\n",
      "Epoch 163/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.2585 - mae: 1.4611 - val_loss: 15.8804 - val_mae: 2.5109\n",
      "Epoch 164/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.2087 - mae: 1.5421 - val_loss: 16.1382 - val_mae: 2.5156\n",
      "Epoch 165/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.2362 - mae: 1.4166 - val_loss: 16.2222 - val_mae: 2.5183\n",
      "Epoch 166/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.8177 - mae: 1.5872 - val_loss: 15.7957 - val_mae: 2.5280\n",
      "Epoch 167/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.9109 - mae: 1.5332 - val_loss: 16.1302 - val_mae: 2.5410\n",
      "Epoch 168/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 5.5741 - mae: 1.5242 - val_loss: 15.4975 - val_mae: 2.4863\n",
      "Epoch 169/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.4538 - mae: 1.4928 - val_loss: 15.7865 - val_mae: 2.4670\n",
      "Epoch 170/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.7231 - mae: 1.4547 - val_loss: 15.9878 - val_mae: 2.5224\n",
      "Epoch 171/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.8894 - mae: 1.5850 - val_loss: 15.9060 - val_mae: 2.4936\n",
      "Epoch 172/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 7.3104 - mae: 1.6026 - val_loss: 15.4795 - val_mae: 2.4982\n",
      "Epoch 173/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 6.1527 - mae: 1.4734 - val_loss: 16.2191 - val_mae: 2.4960\n",
      "Epoch 174/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.2490 - mae: 1.4297 - val_loss: 15.6636 - val_mae: 2.4825\n",
      "Epoch 175/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.1070 - mae: 1.3785 - val_loss: 15.5011 - val_mae: 2.4698\n",
      "Epoch 176/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 4.9884 - mae: 1.4415 - val_loss: 15.5819 - val_mae: 2.4705\n",
      "Epoch 177/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.4252 - mae: 1.4876 - val_loss: 16.3512 - val_mae: 2.4870\n",
      "Epoch 178/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 4.9375 - mae: 1.4202 - val_loss: 15.6888 - val_mae: 2.4848\n",
      "Epoch 179/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.9986 - mae: 1.5036 - val_loss: 15.6919 - val_mae: 2.4855\n",
      "Epoch 180/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.3782 - mae: 1.4622 - val_loss: 15.9344 - val_mae: 2.4916\n",
      "Epoch 181/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.8771 - mae: 1.4319 - val_loss: 16.4359 - val_mae: 2.4933\n",
      "Epoch 182/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 4.7999 - mae: 1.3837 - val_loss: 15.9067 - val_mae: 2.4861\n",
      "Epoch 183/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.2649 - mae: 1.4530 - val_loss: 16.0923 - val_mae: 2.4865\n",
      "Epoch 184/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.1334 - mae: 1.4090 - val_loss: 16.2502 - val_mae: 2.4838\n",
      "Epoch 185/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.9348 - mae: 1.5962 - val_loss: 15.7855 - val_mae: 2.4770\n",
      "Epoch 186/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step - loss: 7.0131 - mae: 1.5909 - val_loss: 16.1641 - val_mae: 2.5221\n",
      "Epoch 187/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 4.8009 - mae: 1.3609 - val_loss: 16.3375 - val_mae: 2.4941\n",
      "Epoch 188/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 4.5159 - mae: 1.4005 - val_loss: 15.8331 - val_mae: 2.4744\n",
      "Epoch 189/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.8331 - mae: 1.5313 - val_loss: 15.3889 - val_mae: 2.4989\n",
      "Epoch 190/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.8585 - mae: 1.6070 - val_loss: 15.8042 - val_mae: 2.4990\n",
      "Epoch 191/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.4687 - mae: 1.5298 - val_loss: 16.2581 - val_mae: 2.4974\n",
      "Epoch 192/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.3480 - mae: 1.5109 - val_loss: 17.1424 - val_mae: 2.5319\n",
      "Epoch 193/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 4.7174 - mae: 1.3589 - val_loss: 16.0947 - val_mae: 2.4886\n",
      "Epoch 194/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 4.7413 - mae: 1.3576 - val_loss: 16.1906 - val_mae: 2.4598\n",
      "Epoch 195/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 6.2626 - mae: 1.4622 - val_loss: 15.4269 - val_mae: 2.4440\n",
      "Epoch 196/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 5.6319 - mae: 1.4157 - val_loss: 15.9994 - val_mae: 2.4609\n",
      "Epoch 197/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 5.0624 - mae: 1.3570 - val_loss: 15.4886 - val_mae: 2.4527\n",
      "Epoch 198/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 4.8879 - mae: 1.3925 - val_loss: 15.8197 - val_mae: 2.4507\n",
      "Epoch 199/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 5.2085 - mae: 1.4630 - val_loss: 15.5438 - val_mae: 2.4493\n",
      "Epoch 200/200\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step - loss: 4.9283 - mae: 1.3632 - val_loss: 16.1027 - val_mae: 2.4479\n",
      "\u001b[1m4/4\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 0s/step - loss: 17.4347 - mae: 2.6118  \n",
      "Test MSE: 20.853269577026367, Test MAE: 2.8216052055358887\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from keras.datasets import boston_housing\n",
    "from keras.models import Sequential\n",
    "from keras.layers import Dense\n",
    "from keras.optimizers import Adam\n",
    "from keras.regularizers import l1_l2\n",
    "# 加载数据集\n",
    "(x_train, y_train), (x_test, y_test) = boston_housing.load_data()\n",
    "\n",
    "# 归一化数据\n",
    "mean = x_train.mean(axis=0)\n",
    "std = x_train.std(axis=0)\n",
    "x_train = (x_train - mean) / std\n",
    "x_test = (x_test - mean) / std\n",
    "\n",
    "\n",
    "# 构建模型\n",
    "model = Sequential([\n",
    "    Dense(128, activation='relu', input_shape=(x_train.shape[1],), kernel_regularizer=l1_l2(l1=0.001, l2=0.001)),\n",
    "    Dense(64, activation='relu', kernel_regularizer=l1_l2(l1=0.001, l2=0.001)),\n",
    "    Dense(1)\n",
    "])\n",
    "\n",
    "# 配置模型\n",
    "model.compile(optimizer=Adam(learning_rate=0.001), loss='mse', metrics=['mae'])\n",
    "\n",
    "\n",
    "# 训练模型\n",
    "model.fit(x_train, y_train, epochs=200, validation_split=0.2, verbose=1)\n",
    "\n",
    "# 评估模型\n",
    "test_mse_score, test_mae_score = model.evaluate(x_test, y_test)\n",
    "print(f\"Test MSE: {test_mse_score}, Test MAE: {test_mae_score}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9105707f-03c8-404f-80b1-a7e892f58e12",
   "metadata": {},
   "source": [
    "## pytorch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "836638b0-3019-4e27-a821-eaf9d3910236",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/400], Loss: 682.5104\n",
      "Epoch [2/400], Loss: 482.1921\n",
      "Epoch [3/400], Loss: 419.3305\n",
      "Epoch [4/400], Loss: 193.2281\n",
      "Epoch [5/400], Loss: 115.7001\n",
      "Epoch [6/400], Loss: 76.7903\n",
      "Epoch [7/400], Loss: 66.8299\n",
      "Epoch [8/400], Loss: 49.9523\n",
      "Epoch [9/400], Loss: 55.5305\n",
      "Epoch [10/400], Loss: 47.6851\n",
      "Epoch [11/400], Loss: 25.1287\n",
      "Epoch [12/400], Loss: 24.7692\n",
      "Epoch [13/400], Loss: 54.4652\n",
      "Epoch [14/400], Loss: 22.2637\n",
      "Epoch [15/400], Loss: 18.2709\n",
      "Epoch [16/400], Loss: 25.2831\n",
      "Epoch [17/400], Loss: 19.9365\n",
      "Epoch [18/400], Loss: 10.4097\n",
      "Epoch [19/400], Loss: 46.2380\n",
      "Epoch [20/400], Loss: 7.2594\n",
      "Epoch [21/400], Loss: 14.9301\n",
      "Epoch [22/400], Loss: 11.4601\n",
      "Epoch [23/400], Loss: 27.5122\n",
      "Epoch [24/400], Loss: 9.4463\n",
      "Epoch [25/400], Loss: 6.0840\n",
      "Epoch [26/400], Loss: 12.4148\n",
      "Epoch [27/400], Loss: 9.3846\n",
      "Epoch [28/400], Loss: 12.2786\n",
      "Epoch [29/400], Loss: 36.5492\n",
      "Epoch [30/400], Loss: 10.0086\n",
      "Epoch [31/400], Loss: 10.1991\n",
      "Epoch [32/400], Loss: 15.3889\n",
      "Epoch [33/400], Loss: 9.0711\n",
      "Epoch [34/400], Loss: 23.8520\n",
      "Epoch [35/400], Loss: 11.1248\n",
      "Epoch [36/400], Loss: 33.2326\n",
      "Epoch [37/400], Loss: 9.2477\n",
      "Epoch [38/400], Loss: 9.9514\n",
      "Epoch [39/400], Loss: 8.7480\n",
      "Epoch [40/400], Loss: 18.3439\n",
      "Epoch [41/400], Loss: 8.7008\n",
      "Epoch [42/400], Loss: 9.1972\n",
      "Epoch [43/400], Loss: 11.1900\n",
      "Epoch [44/400], Loss: 9.2192\n",
      "Epoch [45/400], Loss: 7.4895\n",
      "Epoch [46/400], Loss: 9.2704\n",
      "Epoch [47/400], Loss: 9.6150\n",
      "Epoch [48/400], Loss: 23.5195\n",
      "Epoch [49/400], Loss: 8.5766\n",
      "Epoch [50/400], Loss: 6.7900\n",
      "Epoch [51/400], Loss: 9.3626\n",
      "Epoch [52/400], Loss: 3.9561\n",
      "Epoch [53/400], Loss: 8.5946\n",
      "Epoch [54/400], Loss: 6.2289\n",
      "Epoch [55/400], Loss: 8.1774\n",
      "Epoch [56/400], Loss: 7.1199\n",
      "Epoch [57/400], Loss: 4.4878\n",
      "Epoch [58/400], Loss: 10.9431\n",
      "Epoch [59/400], Loss: 4.2575\n",
      "Epoch [60/400], Loss: 4.2082\n",
      "Epoch [61/400], Loss: 36.1505\n",
      "Epoch [62/400], Loss: 7.5681\n",
      "Epoch [63/400], Loss: 7.9952\n",
      "Epoch [64/400], Loss: 4.2457\n",
      "Epoch [65/400], Loss: 3.6428\n",
      "Epoch [66/400], Loss: 3.5198\n",
      "Epoch [67/400], Loss: 16.5898\n",
      "Epoch [68/400], Loss: 7.3896\n",
      "Epoch [69/400], Loss: 10.0011\n",
      "Epoch [70/400], Loss: 9.2697\n",
      "Epoch [71/400], Loss: 6.5979\n",
      "Epoch [72/400], Loss: 5.4848\n",
      "Epoch [73/400], Loss: 10.2443\n",
      "Epoch [74/400], Loss: 8.0935\n",
      "Epoch [75/400], Loss: 7.1419\n",
      "Epoch [76/400], Loss: 3.2679\n",
      "Epoch [77/400], Loss: 11.4278\n",
      "Epoch [78/400], Loss: 7.7658\n",
      "Epoch [79/400], Loss: 8.0844\n",
      "Epoch [80/400], Loss: 7.8314\n",
      "Epoch [81/400], Loss: 22.7229\n",
      "Epoch [82/400], Loss: 9.4413\n",
      "Epoch [83/400], Loss: 7.3511\n",
      "Epoch [84/400], Loss: 10.2765\n",
      "Epoch [85/400], Loss: 5.1776\n",
      "Epoch [86/400], Loss: 9.8288\n",
      "Epoch [87/400], Loss: 6.1997\n",
      "Epoch [88/400], Loss: 18.6554\n",
      "Epoch [89/400], Loss: 6.9530\n",
      "Epoch [90/400], Loss: 6.1759\n",
      "Epoch [91/400], Loss: 6.8677\n",
      "Epoch [92/400], Loss: 2.5824\n",
      "Epoch [93/400], Loss: 6.5180\n",
      "Epoch [94/400], Loss: 3.2671\n",
      "Epoch [95/400], Loss: 10.9733\n",
      "Epoch [96/400], Loss: 11.8605\n",
      "Epoch [97/400], Loss: 5.5270\n",
      "Epoch [98/400], Loss: 7.8472\n",
      "Epoch [99/400], Loss: 6.0292\n",
      "Epoch [100/400], Loss: 5.2037\n",
      "Epoch [101/400], Loss: 3.9837\n",
      "Epoch [102/400], Loss: 4.9103\n",
      "Epoch [103/400], Loss: 4.7054\n",
      "Epoch [104/400], Loss: 2.6965\n",
      "Epoch [105/400], Loss: 5.1375\n",
      "Epoch [106/400], Loss: 9.8202\n",
      "Epoch [107/400], Loss: 4.9582\n",
      "Epoch [108/400], Loss: 8.1761\n",
      "Epoch [109/400], Loss: 10.8858\n",
      "Epoch [110/400], Loss: 5.6627\n",
      "Epoch [111/400], Loss: 4.7055\n",
      "Epoch [112/400], Loss: 5.1931\n",
      "Epoch [113/400], Loss: 15.8864\n",
      "Epoch [114/400], Loss: 8.6966\n",
      "Epoch [115/400], Loss: 5.3922\n",
      "Epoch [116/400], Loss: 5.6736\n",
      "Epoch [117/400], Loss: 7.1954\n",
      "Epoch [118/400], Loss: 2.9117\n",
      "Epoch [119/400], Loss: 2.3768\n",
      "Epoch [120/400], Loss: 7.0661\n",
      "Epoch [121/400], Loss: 6.0865\n",
      "Epoch [122/400], Loss: 2.1997\n",
      "Epoch [123/400], Loss: 4.1844\n",
      "Epoch [124/400], Loss: 3.7444\n",
      "Epoch [125/400], Loss: 5.9862\n",
      "Epoch [126/400], Loss: 5.9115\n",
      "Epoch [127/400], Loss: 4.7876\n",
      "Epoch [128/400], Loss: 4.8019\n",
      "Epoch [129/400], Loss: 2.7197\n",
      "Epoch [130/400], Loss: 3.9259\n",
      "Epoch [131/400], Loss: 3.9013\n",
      "Epoch [132/400], Loss: 17.3664\n",
      "Epoch [133/400], Loss: 8.2009\n",
      "Epoch [134/400], Loss: 5.6898\n",
      "Epoch [135/400], Loss: 4.4426\n",
      "Epoch [136/400], Loss: 6.8925\n",
      "Epoch [137/400], Loss: 1.9910\n",
      "Epoch [138/400], Loss: 2.5931\n",
      "Epoch [139/400], Loss: 1.9134\n",
      "Epoch [140/400], Loss: 5.0001\n",
      "Epoch [141/400], Loss: 1.3684\n",
      "Epoch [142/400], Loss: 3.6922\n",
      "Epoch [143/400], Loss: 2.5323\n",
      "Epoch [144/400], Loss: 7.7092\n",
      "Epoch [145/400], Loss: 1.3745\n",
      "Epoch [146/400], Loss: 1.6575\n",
      "Epoch [147/400], Loss: 4.8205\n",
      "Epoch [148/400], Loss: 6.1675\n",
      "Epoch [149/400], Loss: 5.9646\n",
      "Epoch [150/400], Loss: 6.0359\n",
      "Epoch [151/400], Loss: 7.8548\n",
      "Epoch [152/400], Loss: 2.4914\n",
      "Epoch [153/400], Loss: 4.3733\n",
      "Epoch [154/400], Loss: 3.7464\n",
      "Epoch [155/400], Loss: 7.2988\n",
      "Epoch [156/400], Loss: 4.0311\n",
      "Epoch [157/400], Loss: 6.6773\n",
      "Epoch [158/400], Loss: 2.7804\n",
      "Epoch [159/400], Loss: 12.0669\n",
      "Epoch [160/400], Loss: 3.7044\n",
      "Epoch [161/400], Loss: 2.6299\n",
      "Epoch [162/400], Loss: 5.0990\n",
      "Epoch [163/400], Loss: 3.0327\n",
      "Epoch [164/400], Loss: 3.9496\n",
      "Epoch [165/400], Loss: 2.8756\n",
      "Epoch [166/400], Loss: 2.2640\n",
      "Epoch [167/400], Loss: 10.6386\n",
      "Epoch [168/400], Loss: 3.1617\n",
      "Epoch [169/400], Loss: 3.6604\n",
      "Epoch [170/400], Loss: 4.0867\n",
      "Epoch [171/400], Loss: 5.8426\n",
      "Epoch [172/400], Loss: 5.4146\n",
      "Epoch [173/400], Loss: 4.4861\n",
      "Epoch [174/400], Loss: 3.6535\n",
      "Epoch [175/400], Loss: 3.5278\n",
      "Epoch [176/400], Loss: 3.2714\n",
      "Epoch [177/400], Loss: 3.9789\n",
      "Epoch [178/400], Loss: 5.3400\n",
      "Epoch [179/400], Loss: 1.8339\n",
      "Epoch [180/400], Loss: 9.6634\n",
      "Epoch [181/400], Loss: 2.6374\n",
      "Epoch [182/400], Loss: 3.5601\n",
      "Epoch [183/400], Loss: 6.3478\n",
      "Epoch [184/400], Loss: 4.7772\n",
      "Epoch [185/400], Loss: 6.3461\n",
      "Epoch [186/400], Loss: 2.1246\n",
      "Epoch [187/400], Loss: 3.6511\n",
      "Epoch [188/400], Loss: 7.5172\n",
      "Epoch [189/400], Loss: 6.2514\n",
      "Epoch [190/400], Loss: 3.2104\n",
      "Epoch [191/400], Loss: 2.9200\n",
      "Epoch [192/400], Loss: 10.5317\n",
      "Epoch [193/400], Loss: 2.7817\n",
      "Epoch [194/400], Loss: 5.6375\n",
      "Epoch [195/400], Loss: 3.1650\n",
      "Epoch [196/400], Loss: 2.1434\n",
      "Epoch [197/400], Loss: 5.2521\n",
      "Epoch [198/400], Loss: 3.1596\n",
      "Epoch [199/400], Loss: 4.5721\n",
      "Epoch [200/400], Loss: 2.1295\n",
      "Epoch [201/400], Loss: 6.0892\n",
      "Epoch [202/400], Loss: 3.7183\n",
      "Epoch [203/400], Loss: 2.8581\n",
      "Epoch [204/400], Loss: 4.5592\n",
      "Epoch [205/400], Loss: 2.8717\n",
      "Epoch [206/400], Loss: 3.1931\n",
      "Epoch [207/400], Loss: 5.2107\n",
      "Epoch [208/400], Loss: 4.2888\n",
      "Epoch [209/400], Loss: 3.9014\n",
      "Epoch [210/400], Loss: 1.6049\n",
      "Epoch [211/400], Loss: 2.0307\n",
      "Epoch [212/400], Loss: 2.8966\n",
      "Epoch [213/400], Loss: 2.6463\n",
      "Epoch [214/400], Loss: 4.2994\n",
      "Epoch [215/400], Loss: 3.0902\n",
      "Epoch [216/400], Loss: 2.1587\n",
      "Epoch [217/400], Loss: 6.4553\n",
      "Epoch [218/400], Loss: 2.8452\n",
      "Epoch [219/400], Loss: 5.0585\n",
      "Epoch [220/400], Loss: 2.0598\n",
      "Epoch [221/400], Loss: 7.2489\n",
      "Epoch [222/400], Loss: 2.4183\n",
      "Epoch [223/400], Loss: 1.1013\n",
      "Epoch [224/400], Loss: 2.8374\n",
      "Epoch [225/400], Loss: 2.3228\n",
      "Epoch [226/400], Loss: 2.3329\n",
      "Epoch [227/400], Loss: 4.2222\n",
      "Epoch [228/400], Loss: 2.9170\n",
      "Epoch [229/400], Loss: 2.2158\n",
      "Epoch [230/400], Loss: 3.4028\n",
      "Epoch [231/400], Loss: 2.8632\n",
      "Epoch [232/400], Loss: 5.1005\n",
      "Epoch [233/400], Loss: 4.2833\n",
      "Epoch [234/400], Loss: 1.9146\n",
      "Epoch [235/400], Loss: 1.9291\n",
      "Epoch [236/400], Loss: 1.7828\n",
      "Epoch [237/400], Loss: 1.8239\n",
      "Epoch [238/400], Loss: 1.9334\n",
      "Epoch [239/400], Loss: 3.2466\n",
      "Epoch [240/400], Loss: 3.2473\n",
      "Epoch [241/400], Loss: 5.8523\n",
      "Epoch [242/400], Loss: 2.3720\n",
      "Epoch [243/400], Loss: 4.4899\n",
      "Epoch [244/400], Loss: 1.1314\n",
      "Epoch [245/400], Loss: 2.0872\n",
      "Epoch [246/400], Loss: 6.6981\n",
      "Epoch [247/400], Loss: 1.9773\n",
      "Epoch [248/400], Loss: 4.6271\n",
      "Epoch [249/400], Loss: 1.5627\n",
      "Epoch [250/400], Loss: 1.7949\n",
      "Epoch [251/400], Loss: 1.8134\n",
      "Epoch [252/400], Loss: 5.1506\n",
      "Epoch [253/400], Loss: 2.6985\n",
      "Epoch [254/400], Loss: 1.3591\n",
      "Epoch [255/400], Loss: 1.4119\n",
      "Epoch [256/400], Loss: 6.8355\n",
      "Epoch [257/400], Loss: 1.7661\n",
      "Epoch [258/400], Loss: 4.7457\n",
      "Epoch [259/400], Loss: 2.0814\n",
      "Epoch [260/400], Loss: 1.1882\n",
      "Epoch [261/400], Loss: 1.9189\n",
      "Epoch [262/400], Loss: 3.7121\n",
      "Epoch [263/400], Loss: 3.5306\n",
      "Epoch [264/400], Loss: 1.9523\n",
      "Epoch [265/400], Loss: 1.6970\n",
      "Epoch [266/400], Loss: 3.6146\n",
      "Epoch [267/400], Loss: 2.3976\n",
      "Epoch [268/400], Loss: 2.3850\n",
      "Epoch [269/400], Loss: 2.2513\n",
      "Epoch [270/400], Loss: 1.9197\n",
      "Epoch [271/400], Loss: 3.3896\n",
      "Epoch [272/400], Loss: 5.0566\n",
      "Epoch [273/400], Loss: 3.7941\n",
      "Epoch [274/400], Loss: 1.4830\n",
      "Epoch [275/400], Loss: 2.7747\n",
      "Epoch [276/400], Loss: 1.7998\n",
      "Epoch [277/400], Loss: 5.0510\n",
      "Epoch [278/400], Loss: 1.9425\n",
      "Epoch [279/400], Loss: 1.9905\n",
      "Epoch [280/400], Loss: 6.2641\n",
      "Epoch [281/400], Loss: 1.5308\n",
      "Epoch [282/400], Loss: 3.3059\n",
      "Epoch [283/400], Loss: 2.9191\n",
      "Epoch [284/400], Loss: 2.8147\n",
      "Epoch [285/400], Loss: 5.7082\n",
      "Epoch [286/400], Loss: 1.6708\n",
      "Epoch [287/400], Loss: 2.5240\n",
      "Epoch [288/400], Loss: 2.8585\n",
      "Epoch [289/400], Loss: 1.6209\n",
      "Epoch [290/400], Loss: 2.4043\n",
      "Epoch [291/400], Loss: 1.0393\n",
      "Epoch [292/400], Loss: 2.2784\n",
      "Epoch [293/400], Loss: 4.1449\n",
      "Epoch [294/400], Loss: 1.9205\n",
      "Epoch [295/400], Loss: 1.4293\n",
      "Epoch [296/400], Loss: 1.8436\n",
      "Epoch [297/400], Loss: 1.5839\n",
      "Epoch [298/400], Loss: 1.9860\n",
      "Epoch [299/400], Loss: 1.3196\n",
      "Epoch [300/400], Loss: 0.9256\n",
      "Epoch [301/400], Loss: 2.8690\n",
      "Epoch [302/400], Loss: 2.8643\n",
      "Epoch [303/400], Loss: 1.3617\n",
      "Epoch [304/400], Loss: 2.2461\n",
      "Epoch [305/400], Loss: 2.4282\n",
      "Epoch [306/400], Loss: 4.5423\n",
      "Epoch [307/400], Loss: 4.7372\n",
      "Epoch [308/400], Loss: 1.5339\n",
      "Epoch [309/400], Loss: 1.2479\n",
      "Epoch [310/400], Loss: 1.0341\n",
      "Epoch [311/400], Loss: 1.7313\n",
      "Epoch [312/400], Loss: 1.4783\n",
      "Epoch [313/400], Loss: 1.4894\n",
      "Epoch [314/400], Loss: 3.4239\n",
      "Epoch [315/400], Loss: 1.9357\n",
      "Epoch [316/400], Loss: 3.2019\n",
      "Epoch [317/400], Loss: 3.8760\n",
      "Epoch [318/400], Loss: 1.8028\n",
      "Epoch [319/400], Loss: 2.3891\n",
      "Epoch [320/400], Loss: 1.2714\n",
      "Epoch [321/400], Loss: 4.2180\n",
      "Epoch [322/400], Loss: 1.7841\n",
      "Epoch [323/400], Loss: 1.4546\n",
      "Epoch [324/400], Loss: 1.5622\n",
      "Epoch [325/400], Loss: 1.3534\n",
      "Epoch [326/400], Loss: 3.3864\n",
      "Epoch [327/400], Loss: 6.2350\n",
      "Epoch [328/400], Loss: 1.7707\n",
      "Epoch [329/400], Loss: 1.1458\n",
      "Epoch [330/400], Loss: 3.2786\n",
      "Epoch [331/400], Loss: 1.3581\n",
      "Epoch [332/400], Loss: 0.4217\n",
      "Epoch [333/400], Loss: 1.0973\n",
      "Epoch [334/400], Loss: 1.9730\n",
      "Epoch [335/400], Loss: 3.4716\n",
      "Epoch [336/400], Loss: 2.0359\n",
      "Epoch [337/400], Loss: 3.0744\n",
      "Epoch [338/400], Loss: 1.2128\n",
      "Epoch [339/400], Loss: 1.8517\n",
      "Epoch [340/400], Loss: 2.1981\n",
      "Epoch [341/400], Loss: 1.1998\n",
      "Epoch [342/400], Loss: 1.3158\n",
      "Epoch [343/400], Loss: 3.4233\n",
      "Epoch [344/400], Loss: 2.7973\n",
      "Epoch [345/400], Loss: 1.0470\n",
      "Epoch [346/400], Loss: 1.1025\n",
      "Epoch [347/400], Loss: 2.1962\n",
      "Epoch [348/400], Loss: 1.5381\n",
      "Epoch [349/400], Loss: 5.9269\n",
      "Epoch [350/400], Loss: 2.7128\n",
      "Epoch [351/400], Loss: 1.0129\n",
      "Epoch [352/400], Loss: 2.1550\n",
      "Epoch [353/400], Loss: 0.5562\n",
      "Epoch [354/400], Loss: 2.1336\n",
      "Epoch [355/400], Loss: 1.3485\n",
      "Epoch [356/400], Loss: 1.5270\n",
      "Epoch [357/400], Loss: 2.0904\n",
      "Epoch [358/400], Loss: 1.1156\n",
      "Epoch [359/400], Loss: 1.8193\n",
      "Epoch [360/400], Loss: 1.2367\n",
      "Epoch [361/400], Loss: 2.0893\n",
      "Epoch [362/400], Loss: 1.5087\n",
      "Epoch [363/400], Loss: 1.4010\n",
      "Epoch [364/400], Loss: 3.9673\n",
      "Epoch [365/400], Loss: 1.6680\n",
      "Epoch [366/400], Loss: 2.6207\n",
      "Epoch [367/400], Loss: 2.3910\n",
      "Epoch [368/400], Loss: 1.4179\n",
      "Epoch [369/400], Loss: 1.1323\n",
      "Epoch [370/400], Loss: 1.1763\n",
      "Epoch [371/400], Loss: 3.4513\n",
      "Epoch [372/400], Loss: 3.3314\n",
      "Epoch [373/400], Loss: 1.5718\n",
      "Epoch [374/400], Loss: 2.1616\n",
      "Epoch [375/400], Loss: 1.8688\n",
      "Epoch [376/400], Loss: 2.4428\n",
      "Epoch [377/400], Loss: 1.6513\n",
      "Epoch [378/400], Loss: 1.3538\n",
      "Epoch [379/400], Loss: 2.9921\n",
      "Epoch [380/400], Loss: 3.5829\n",
      "Epoch [381/400], Loss: 3.1774\n",
      "Epoch [382/400], Loss: 4.6542\n",
      "Epoch [383/400], Loss: 1.2925\n",
      "Epoch [384/400], Loss: 3.8231\n",
      "Epoch [385/400], Loss: 1.0396\n",
      "Epoch [386/400], Loss: 1.1272\n",
      "Epoch [387/400], Loss: 0.5375\n",
      "Epoch [388/400], Loss: 1.1723\n",
      "Epoch [389/400], Loss: 2.3967\n",
      "Epoch [390/400], Loss: 0.5629\n",
      "Epoch [391/400], Loss: 2.6605\n",
      "Epoch [392/400], Loss: 3.6398\n",
      "Epoch [393/400], Loss: 1.2993\n",
      "Epoch [394/400], Loss: 2.0339\n",
      "Epoch [395/400], Loss: 1.9151\n",
      "Epoch [396/400], Loss: 2.6238\n",
      "Epoch [397/400], Loss: 1.3778\n",
      "Epoch [398/400], Loss: 2.5045\n",
      "Epoch [399/400], Loss: 1.5585\n",
      "Epoch [400/400], Loss: 1.3694\n",
      "Test MSE: 15.6694, Test MAE: 2.3235\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader, TensorDataset, random_split\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "import torch.nn.functional as F\n",
    "import pandas as pd\n",
    "import torch\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "# 下载数据\n",
    "url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data\"\n",
    "columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']\n",
    "data = pd.read_csv(url, delim_whitespace=True, names=columns)\n",
    "\n",
    "# 分割特征和标签\n",
    "X = data.iloc[:, :-1].values\n",
    "y = data.iloc[:, -1].values.reshape(-1, 1)\n",
    "\n",
    "# 数据标准化\n",
    "scaler = StandardScaler()\n",
    "X_scaled = scaler.fit_transform(X)\n",
    "\n",
    "# 转换为 PyTorch tensors\n",
    "X_tensor = torch.tensor(X_scaled, dtype=torch.float32)\n",
    "y_tensor = torch.tensor(y, dtype=torch.float32)\n",
    "\n",
    "# 创建数据集和数据加载器\n",
    "dataset = TensorDataset(X_tensor, y_tensor)\n",
    "train_dataset, test_dataset = train_test_split(dataset, test_size=0.2, random_state=42)\n",
    "train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=32)\n",
    "\n",
    "# 数据标准化\n",
    "scaler = StandardScaler()\n",
    "X_scaled = scaler.fit_transform(X)\n",
    "y = y.reshape(-1, 1)\n",
    "\n",
    "# 转换为 PyTorch tensors\n",
    "X_tensor = torch.tensor(X_scaled, dtype=torch.float32)\n",
    "y_tensor = torch.tensor(y, dtype=torch.float32)\n",
    "\n",
    "# 创建数据集和数据加载器\n",
    "dataset = TensorDataset(X_tensor, y_tensor)\n",
    "train_dataset, test_dataset = random_split(dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8 * len(dataset))])\n",
    "train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=32)\n",
    "\n",
    "class BostonModel(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(BostonModel, self).__init__()\n",
    "        self.linear1 = nn.Linear(13, 128)\n",
    "        self.act1 = nn.ReLU()\n",
    "        self.linear2 = nn.Linear(128, 64)\n",
    "        self.act2 = nn.ReLU()\n",
    "        self.output = nn.Linear(64, 1)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.linear1(x)\n",
    "        x = self.act1(x)\n",
    "        x = self.linear2(x)\n",
    "        x = self.act2(x)\n",
    "        x = self.output(x)\n",
    "        return x\n",
    "\n",
    "model = BostonModel()\n",
    "\n",
    "criterion = nn.MSELoss()\n",
    "optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "\n",
    "num_epochs = 400\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    for inputs, targets in train_loader:\n",
    "        optimizer.zero_grad()\n",
    "        outputs = model(inputs)\n",
    "        loss = criterion(outputs, targets)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "    # 打印每个epoch的损失情况\n",
    "    print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')\n",
    "\n",
    "# 评估模型\n",
    "model.eval()\n",
    "total_mse = 0\n",
    "total_mae = 0\n",
    "total_count = 0\n",
    "\n",
    "with torch.no_grad():\n",
    "    for inputs, targets in test_loader:\n",
    "        outputs = model(inputs)\n",
    "        mse = F.mse_loss(outputs, targets, reduction='sum').item()\n",
    "        mae = F.l1_loss(outputs, targets, reduction='sum').item()\n",
    "        total_mse += mse\n",
    "        total_mae += mae\n",
    "        total_count += targets.size(0)\n",
    "\n",
    "average_mse = total_mse / total_count\n",
    "average_mae = total_mae / total_count\n",
    "\n",
    "print(f'Test MSE: {average_mse:.4f}, Test MAE: {average_mae:.4f}')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6b4e4d07-13c1-4437-9ccc-245bb3586eec",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
