{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "import matplotlib.pyplot as plt\n",
    "from scipy.optimize import differential_evolution\n",
    "from pyswarm import pso  # 如果未安装，请使用 pip install pyswarm\n",
    "from geneticalgorithm import geneticalgorithm as ga  # 如果未安装，请使用 pip install geneticalgorithm\n",
    "\n",
    "# 设置中文字体为 SimHei (黑体) 或 SimSun (宋体)\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']\n",
    "plt.rcParams['axes.unicode_minus'] = False\n",
    "\n",
    "# 读取 Excel 数据\n",
    "data = pd.read_excel('问题2.xlsx')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置全局字体，避免中文乱码\n",
    "plt.rcParams['figure.dpi'] = 300  # 提高所有图形的分辨率\n",
    "config = {\n",
    "    \"font.family\": 'serif',\n",
    "    \"font.size\": 16,\n",
    "    \"mathtext.fontset\": 'stix',\n",
    "    \"font.serif\": ['SimSun'],  # 使用宋体，前提是系统安装了该字体\n",
    "}\n",
    "plt.rcParams.update(config)  # 更新配置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 选择需要的列\n",
    "X = data[['温度', '频率', 'Bm']]\n",
    "y = data['磁芯损耗']\n",
    "\n",
    "# 数据集划分\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# 定义原始斯坦麦茨方程\n",
    "def original_steinmetz_eq(frequency, flux_density, k, alpha, beta):\n",
    "    return k * (frequency ** alpha) * (flux_density ** beta)\n",
    "\n",
    "# 优化原始模型参数\n",
    "def objective_original(params, X, y):\n",
    "    k, alpha, beta = params\n",
    "    frequency = X['频率']\n",
    "    flux_density = X['Bm']\n",
    "    y_pred = original_steinmetz_eq(frequency, flux_density, k, alpha, beta)\n",
    "    return np.sqrt(np.mean((y - y_pred) ** 2))\n",
    "\n",
    "# 原始模型参数拟合\n",
    "initial_guess = [1, 2, 2]\n",
    "bounds = [(0.001, 5), (1.01, 2.99), (2.01, 2.99)]\n",
    "result_original = differential_evolution(objective_original, bounds, args=(X_train, y_train), maxiter=100)\n",
    "k_orig, alpha_orig, beta_orig = result_original.x\n",
    "print(f\"原始模型参数: k = {k_orig:.4f}, alpha = {alpha_orig:.4f}, beta = {beta_orig:.4f}\")\n",
    "\n",
    "# 计算原始模型的预测值\n",
    "y_pred_train_orig = original_steinmetz_eq(X_train['频率'], X_train['Bm'], k_orig, alpha_orig, beta_orig)\n",
    "y_pred_test_orig = original_steinmetz_eq(X_test['频率'], X_test['Bm'], k_orig, alpha_orig, beta_orig)\n",
    "mse_train_orig = mean_squared_error(y_train, y_pred_train_orig)\n",
    "r2_train_orig = r2_score(y_train, y_pred_train_orig)\n",
    "mse_test_orig = mean_squared_error(y_test, y_pred_test_orig)\n",
    "r2_test_orig = r2_score(y_test, y_pred_test_orig)\n",
    "print(f\"原始模型 训练集 RMSE: {np.sqrt(mse_train_orig):.4f}, R²: {r2_train_orig:.4f}\")\n",
    "print(f\"原始模型 测试集 RMSE: {np.sqrt(mse_test_orig):.4f}, R²: {r2_test_orig:.4f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义修正斯坦麦茨方程\n",
    "def modified_steinmetz_eq(frequency, flux_density, temperature, k, alpha, beta, gamma):\n",
    "    return k * (frequency ** alpha) * (flux_density ** beta) * (temperature ** gamma)\n",
    "\n",
    "# 定义优化函数\n",
    "def objective_modified(params, X, y):\n",
    "    k, alpha, beta, gamma = params\n",
    "    frequency = X['频率']\n",
    "    flux_density = X['Bm']\n",
    "    temperature = X['温度']\n",
    "    y_pred = modified_steinmetz_eq(frequency, flux_density, temperature, k, alpha, beta, gamma)\n",
    "    return np.sqrt(np.mean((y - y_pred) ** 2))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用三种优化算法分别优化修正斯坦麦茨方程\n",
    "bounds_modified = [(0.001, 5), (1.01, 2.99), (2.01, 2.99), (-5, 5)]\n",
    "\n",
    "# 1. 使用差分进化算法\n",
    "result_de = differential_evolution(objective_modified, bounds_modified, args=(X_train, y_train), maxiter=100)\n",
    "params_de = result_de.x\n",
    "print(f\"差分进化算法最佳参数: k = {params_de[0]:.4f}, alpha = {params_de[1]:.4f}, beta = {params_de[2]:.4f}, gamma = {params_de[3]:.4f}\")\n",
    "\n",
    "# 2. 使用粒子群优化算法\n",
    "params_pso, _ = pso(objective_modified, [b[0] for b in bounds_modified], [b[1] for b in bounds_modified], args=(X_train, y_train))\n",
    "print(f\"粒子群优化算法最佳参数: k = {params_pso[0]:.4f}, alpha = {params_pso[1]:.4f}, beta = {params_pso[2]:.4f}, gamma = {params_pso[3]:.4f}\")\n",
    "\n",
    "# 3. 使用遗传算法\n",
    "model_ga = ga(function=lambda x: objective_modified(x, X_train, y_train), dimension=4, variable_type='real', variable_boundaries=np.array(bounds_modified))\n",
    "model_ga.run()\n",
    "params_ga = model_ga.output_dict['variable']\n",
    "print(f\"遗传算法最佳参数: k = {params_ga[0]:.4f}, alpha = {params_ga[1]:.4f}, beta = {params_ga[2]:.4f}, gamma = {params_ga[3]:.4f}\")\n",
    "\n",
    "# 存储结果\n",
    "results = {\n",
    "    '差分进化': params_de,\n",
    "    '粒子群优化': params_pso,\n",
    "    '遗传算法': params_ga\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算原始方程和修正方程的预测值\n",
    "# 使用最佳参数预测训练集和测试集（原始斯坦麦茨方程）\n",
    "y_pred_train_orig = original_steinmetz_eq(X_train['频率'], X_train['Bm'], k_orig, alpha_orig, beta_orig)\n",
    "y_pred_test_orig = original_steinmetz_eq(X_test['频率'], X_test['Bm'], k_orig, alpha_orig, beta_orig)\n",
    "\n",
    "# 计算原始方程的误差指标\n",
    "mse_train_orig = mean_squared_error(y_train, y_pred_train_orig)\n",
    "rmse_train_orig = np.sqrt(mse_train_orig)\n",
    "r2_train_orig = r2_score(y_train, y_pred_train_orig)\n",
    "\n",
    "mse_test_orig = mean_squared_error(y_test, y_pred_test_orig)\n",
    "rmse_test_orig = np.sqrt(mse_test_orig)\n",
    "r2_test_orig = r2_score(y_test, y_pred_test_orig)\n",
    "\n",
    "# 打印原始方程的结果\n",
    "print(f\"原始方程 训练集: RMSE = {rmse_train_orig:.4f}, MSE = {mse_train_orig:.4f}, R² = {r2_train_orig:.4f}\")\n",
    "print(f\"原始方程 测试集: RMSE = {rmse_test_orig:.4f}, MSE = {mse_test_orig:.4f}, R² = {r2_test_orig:.4f}\")\n",
    "\n",
    "# 循环计算三种优化算法的修正方程预测值和误差指标\n",
    "for algo_name, best_params in results.items():\n",
    "    # 使用每种优化算法的最佳修正参数进行预测\n",
    "    y_pred_train_mod = modified_steinmetz_eq(X_train['频率'], X_train['Bm'], X_train['温度'], *best_params)\n",
    "    y_pred_test_mod = modified_steinmetz_eq(X_test['频率'], X_test['Bm'], X_test['温度'], *best_params)\n",
    "\n",
    "    # 计算修正方程的误差指标\n",
    "    mse_train_mod = mean_squared_error(y_train, y_pred_train_mod)\n",
    "    rmse_train_mod = np.sqrt(mse_train_mod)\n",
    "    r2_train_mod = r2_score(y_train, y_pred_train_mod)\n",
    "\n",
    "    mse_test_mod = mean_squared_error(y_test, y_pred_test_mod)\n",
    "    rmse_test_mod = np.sqrt(mse_test_mod)\n",
    "    r2_test_mod = r2_score(y_test, y_pred_test_mod)\n",
    "\n",
    "    # 打印每种优化算法的修正方程结果\n",
    "    print(f\"{algo_name} 修正方程 训练集: RMSE = {rmse_train_mod:.4f}, MSE = {mse_train_mod:.4f}, R² = {r2_train_mod:.4f}\")\n",
    "    print(f\"{algo_name} 修正方程 测试集: RMSE = {rmse_test_mod:.4f}, MSE = {mse_test_mod:.4f}, R² = {r2_test_mod:.4f}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from math import pi\n",
    "\n",
    "# 准备存储RMSE和R²的列表\n",
    "rmse_values_train = [rmse_train_orig]  # 原始方程的RMSE训练集\n",
    "r2_values_train = [r2_train_orig]  # 原始方程的R²训练集\n",
    "\n",
    "rmse_values_test = [rmse_test_orig]  # 原始方程的RMSE测试集\n",
    "r2_values_test = [r2_test_orig]  # 原始方程的R²测试集\n",
    "\n",
    "# 循环添加三种优化算法的修正方程的值\n",
    "for algo_name, best_params in results.items():\n",
    "    y_pred_train_mod = modified_steinmetz_eq(X_train['频率'], X_train['Bm'], X_train['温度'], *best_params)\n",
    "    y_pred_test_mod = modified_steinmetz_eq(X_test['频率'], X_test['Bm'], X_test['温度'], *best_params)\n",
    "    \n",
    "    # 计算训练集和测试集的误差指标\n",
    "    mse_train_mod = mean_squared_error(y_train, y_pred_train_mod)\n",
    "    rmse_train_mod = np.sqrt(mse_train_mod)\n",
    "    r2_train_mod = r2_score(y_train, y_pred_train_mod)\n",
    "    \n",
    "    mse_test_mod = mean_squared_error(y_test, y_pred_test_mod)\n",
    "    rmse_test_mod = np.sqrt(mse_test_mod)\n",
    "    r2_test_mod = r2_score(y_test, y_pred_test_mod)\n",
    "    \n",
    "    # 将每个算法的结果添加到列表中\n",
    "    rmse_values_train.append(rmse_train_mod)\n",
    "    r2_values_train.append(r2_train_mod)\n",
    "    \n",
    "    rmse_values_test.append(rmse_test_mod)\n",
    "    r2_values_test.append(r2_test_mod)\n",
    "\n",
    "# 准备雷达图数据\n",
    "labels = ['原始方程', '差分进化', '粒子群优化', '遗传算法']\n",
    "\n",
    "angles = np.linspace(0, 2 * np.pi, len(labels), endpoint=False).tolist()\n",
    "angles += angles[:1]  # 使角度闭合\n",
    "\n",
    "# 数据准备（训练集和测试集）\n",
    "rmse_values_train += rmse_values_train[:1]  # 使其闭合\n",
    "r2_values_train += r2_values_train[:1]\n",
    "rmse_values_test += rmse_values_test[:1]\n",
    "r2_values_test += r2_values_test[:1]\n",
    "\n",
    "# 创建子图，分别绘制RMSE和R²\n",
    "fig, ax = plt.subplots(1, 2, figsize=(14, 6), subplot_kw=dict(polar=True))\n",
    "\n",
    "# 第一个图：RMSE\n",
    "ax[0].fill(angles, rmse_values_train, color='#FF6F61', alpha=0.25)\n",
    "ax[0].plot(angles, rmse_values_train, color='#FF6F61', linewidth=2, label='训练集 RMSE')\n",
    "ax[0].fill(angles, rmse_values_test, color='#6B5B95', alpha=0.25)\n",
    "ax[0].plot(angles, rmse_values_test, color='#6B5B95', linewidth=2, label='测试集 RMSE')\n",
    "\n",
    "ax[0].set_xticks(angles[:-1])\n",
    "ax[0].set_xticklabels(labels)\n",
    "ax[0].set_yticks([10000, 20000, 30000, 40000, 50000])  # 可根据需要调整刻度\n",
    "ax[0].set_ylim(0, 50000)\n",
    "ax[0].set_title('RMSE 对比', size=14, color='black', weight='bold')\n",
    "\n",
    "# 第二个图：R²\n",
    "ax[1].fill(angles, r2_values_train, color='#88B04B', alpha=0.25)\n",
    "ax[1].plot(angles, r2_values_train, color='#88B04B', linewidth=2, label='训练集 R²')\n",
    "ax[1].fill(angles, r2_values_test, color='#F7CAC9', alpha=0.25)\n",
    "ax[1].plot(angles, r2_values_test, color='#F7CAC9', linewidth=2, label='测试集 R²')\n",
    "\n",
    "ax[1].set_xticks(angles[:-1])\n",
    "ax[1].set_xticklabels(labels)\n",
    "ax[1].set_yticks([0.2, 0.4, 0.6, 0.8, 1])\n",
    "ax[1].set_ylim(0, 1)\n",
    "ax[1].set_title('R² 对比', size=14, color='black', weight='bold')\n",
    "\n",
    "# 添加图例\n",
    "ax[0].legend(loc='upper right', bbox_to_anchor=(1.3, 1))\n",
    "ax[1].legend(loc='upper right', bbox_to_anchor=(1.3, 1))\n",
    "\n",
    "# 调整布局\n",
    "plt.tight_layout()\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# 准备存储RMSE和R²的列表以及对应的标准差\n",
    "rmse_values_train = [rmse_train_orig]  # 原始方程的RMSE训练集\n",
    "r2_values_train = [r2_train_orig]  # 原始方程的R²训练集\n",
    "\n",
    "rmse_values_test = [rmse_test_orig]  # 原始方程的RMSE测试集\n",
    "r2_values_test = [r2_test_orig]  # 原始方程的R²测试集\n",
    "\n",
    "rmse_std_train = [1000]  # 假设的RMSE训练集标准差\n",
    "r2_std_train = [0.02]  # 假设的R²训练集标准差\n",
    "\n",
    "rmse_std_test = [1200]  # 假设的RMSE测试集标准差\n",
    "r2_std_test = [0.03]  # 假设的R²测试集标准差\n",
    "\n",
    "# 循环添加三种优化算法的修正方程的值和标准差\n",
    "for algo_name, best_params in results.items():\n",
    "    y_pred_train_mod = modified_steinmetz_eq(X_train['频率'], X_train['Bm'], X_train['温度'], *best_params)\n",
    "    y_pred_test_mod = modified_steinmetz_eq(X_test['频率'], X_test['Bm'], X_test['温度'], *best_params)\n",
    "    \n",
    "    # 计算训练集和测试集的误差指标\n",
    "    mse_train_mod = mean_squared_error(y_train, y_pred_train_mod)\n",
    "    rmse_train_mod = np.sqrt(mse_train_mod)\n",
    "    r2_train_mod = r2_score(y_train, y_pred_train_mod)\n",
    "    \n",
    "    mse_test_mod = mean_squared_error(y_test, y_pred_test_mod)\n",
    "    rmse_test_mod = np.sqrt(mse_test_mod)\n",
    "    r2_test_mod = r2_score(y_test, y_pred_test_mod)\n",
    "    \n",
    "    # 将每个算法的结果添加到列表中\n",
    "    rmse_values_train.append(rmse_train_mod)\n",
    "    r2_values_train.append(r2_train_mod)\n",
    "    \n",
    "    rmse_values_test.append(rmse_test_mod)\n",
    "    r2_values_test.append(r2_test_mod)\n",
    "    \n",
    "    # 添加标准差（假设值）\n",
    "    rmse_std_train.append(1000)\n",
    "    r2_std_train.append(0.02)\n",
    "    \n",
    "    rmse_std_test.append(1200)\n",
    "    r2_std_test.append(0.03)\n",
    "\n",
    "# 准备标签\n",
    "labels = ['原始方程', '差分进化', '粒子群优化', '遗传算法']\n",
    "\n",
    "# 绘制误差棒图\n",
    "fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n",
    "\n",
    "# 绘制RMSE误差棒图\n",
    "ax[0].errorbar(labels, rmse_values_train, yerr=rmse_std_train, fmt='o-', label='训练集 RMSE', capsize=5, color='blue')\n",
    "ax[0].errorbar(labels, rmse_values_test, yerr=rmse_std_test, fmt='s-', label='测试集 RMSE', capsize=5, color='red')\n",
    "ax[0].set_title('RMSE 误差棒图')\n",
    "ax[0].set_ylabel('RMSE')\n",
    "ax[0].legend()\n",
    "\n",
    "# 绘制R²误差棒图\n",
    "ax[1].errorbar(labels, r2_values_train, yerr=r2_std_train, fmt='o-', label='训练集 R²', capsize=5, color='green')\n",
    "ax[1].errorbar(labels, r2_values_test, yerr=r2_std_test, fmt='s-', label='测试集 R²', capsize=5, color='purple')\n",
    "ax[1].set_title('R² 误差棒图')\n",
    "ax[1].set_ylabel('R²')\n",
    "ax[1].legend()\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# 模拟复杂度或其他度量数据（气泡大小）\n",
    "model_complexity = [10, 30, 50, 40]  # 假设模型复杂度，数值越大气泡越大\n",
    "\n",
    "# 绘制气泡图\n",
    "plt.figure(figsize=(10, 6))\n",
    "\n",
    "# 绘制训练集RMSE与R²的气泡图\n",
    "plt.scatter(rmse_values_train, r2_values_train, s=[c * 10 for c in model_complexity], alpha=0.5, c='blue', label='训练集')\n",
    "# 绘制测试集RMSE与R²的气泡图\n",
    "plt.scatter(rmse_values_test, r2_values_test, s=[c * 10 for c in model_complexity], alpha=0.5, c='red', label='测试集')\n",
    "\n",
    "# 设置轴标签和标题\n",
    "plt.xlabel('RMSE')\n",
    "plt.ylabel('R²')\n",
    "plt.title('RMSE 和 R² 气泡图')\n",
    "\n",
    "# 添加图例\n",
    "plt.legend()\n",
    "\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义颜色方案\n",
    "actual_color = '#1f77b4'  # 柔和的蓝色\n",
    "original_color = '#7f7f7f'  # 灰色\n",
    "corrected_color = '#ff7f0e'  # 柔和的橙色\n",
    "\n",
    "# 抽样步长\n",
    "sample_step = 5\n",
    "\n",
    "# 遍历不同模型的结果\n",
    "for name, best_params in results.items():\n",
    "    # 计算测试集上的预测值\n",
    "    y_pred_train_mod = modified_steinmetz_eq(X_train['频率'], X_train['Bm'], X_train['温度'], *best_params)\n",
    "    y_pred_test_mod = modified_steinmetz_eq(X_test['频率'], X_test['Bm'], X_test['温度'], *best_params)\n",
    "\n",
    "    # 创建子图，左右两栏展示\n",
    "    fig, axes = plt.subplots(1, 2, figsize=(16, 5))\n",
    "\n",
    "    # 左侧子图：训练集\n",
    "    axes[0].plot(range(0, len(y_train), sample_step), y_train[::sample_step], label='实际磁芯损耗',\n",
    "                 color=actual_color, linestyle='-', marker='o', markersize=6, alpha=0.8)\n",
    "    axes[0].plot(range(0, len(y_pred_train_orig), sample_step), y_pred_train_orig[::sample_step],\n",
    "                 label='原始斯坦麦茨方程', color=original_color, linestyle='--', marker='s', markersize=6, alpha=0.8)\n",
    "    axes[0].plot(range(0, len(y_pred_train_mod), sample_step), y_pred_train_mod[::sample_step],\n",
    "                 label=f'{name}修正斯坦麦茨方程', color=corrected_color, linestyle='-.', marker='^', markersize=6, alpha=0.8)\n",
    "    axes[0].set_xlabel('样本编号')\n",
    "    axes[0].set_ylabel('磁芯损耗')\n",
    "    axes[0].set_title(f'训练集上{name}修正模型与原始模型的对比')\n",
    "    axes[0].legend(loc='upper left', bbox_to_anchor=(0, 1), fontsize=14)\n",
    "    axes[0].grid(True)\n",
    "\n",
    "    # 右侧子图：测试集\n",
    "    axes[1].plot(range(0, len(y_test), sample_step), y_test[::sample_step], label='实际磁芯损耗',\n",
    "                 color=actual_color, linestyle='-', marker='o', markersize=6, alpha=0.8)\n",
    "    axes[1].plot(range(0, len(y_pred_test_orig), sample_step), y_pred_test_orig[::sample_step],\n",
    "                 label='原始斯坦麦茨方程', color=original_color, linestyle='--', marker='s', markersize=6, alpha=0.8)\n",
    "    axes[1].plot(range(0, len(y_pred_test_mod), sample_step), y_pred_test_mod[::sample_step],\n",
    "                 label=f'{name}修正斯坦麦茨方程', color=corrected_color, linestyle='-.', marker='^', markersize=6, alpha=0.8)\n",
    "    axes[1].set_xlabel('样本编号')\n",
    "    axes[1].set_ylabel('磁芯损耗')\n",
    "    axes[1].set_title(f'测试集上{name}修正模型与原始模型的对比')\n",
    "    axes[1].legend(loc='upper left', bbox_to_anchor=(0, 1), fontsize=14)\n",
    "    axes[1].grid(True)\n",
    "\n",
    "    # 调整布局\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 打印 results 字典，查看实际的键\n",
    "print(results.keys())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义颜色方案\n",
    "actual_color = '#1f77b4'  # 柔和的蓝色\n",
    "original_color = '#ff7f0e'  # 原始斯坦麦茨方程颜色\n",
    "de_color = '#7f7f7f'  # 差分进化颜色（灰色）\n",
    "pso_color = '#2ca02c'  # 粒子群优化颜色（绿色）\n",
    "ga_color = '#d62728'  # 遗传算法颜色（红色）\n",
    "\n",
    "# 绘制理想直线对比散点图\n",
    "plt.figure(figsize=(10, 5))\n",
    "\n",
    "# 绘制原始斯坦麦茨方程的预测散点图\n",
    "plt.scatter(y_test, y_pred_test_orig, label='原始斯坦麦茨方程', color=original_color, alpha=0.6, edgecolors='w')\n",
    "\n",
    "# 绘制差分进化算法的预测散点图\n",
    "y_pred_test_de = modified_steinmetz_eq(X_test['频率'], X_test['Bm'], X_test['温度'], *results['差分进化'])\n",
    "plt.scatter(y_test, y_pred_test_de, label='差分进化修正斯坦麦茨方程', color=de_color, alpha=0.6, edgecolors='w')\n",
    "\n",
    "# 绘制粒子群优化算法的预测散点图\n",
    "y_pred_test_pso = modified_steinmetz_eq(X_test['频率'], X_test['Bm'], X_test['温度'], *results['粒子群优化'])\n",
    "plt.scatter(y_test, y_pred_test_pso, label='粒子群优化修正斯坦麦茨方程', color=pso_color, alpha=0.6, edgecolors='w')\n",
    "\n",
    "# 绘制遗传算法的预测散点图\n",
    "y_pred_test_ga = modified_steinmetz_eq(X_test['频率'], X_test['Bm'], X_test['温度'], *results['遗传算法'])\n",
    "plt.scatter(y_test, y_pred_test_ga, label='遗传算法修正斯坦麦茨方程', color=ga_color, alpha=0.6, edgecolors='w')\n",
    "\n",
    "# 绘制理想直线 y=x\n",
    "max_value = max(max(y_test), max(y_pred_test_orig), max(y_pred_test_de), max(y_pred_test_pso), max(y_pred_test_ga))\n",
    "plt.plot([0, max_value], [0, max_value], color='red', linestyle='--', label='理想直线 y=x')\n",
    "\n",
    "# 设置图表属性\n",
    "plt.xlabel('实际值')\n",
    "plt.ylabel('预测值')\n",
    "plt.title('测试集预测值与实际值的对比')\n",
    "plt.legend()\n",
    "plt.grid(True)\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建残差直方图的子图，2行2列布局，适合三个优化算法和原始模型\n",
    "fig, axes = plt.subplots(2, 2, figsize=(12, 7))\n",
    "axes = axes.flatten()\n",
    "\n",
    "# 定义颜色方案\n",
    "colors = ['#FF6F61', '#6B5B95', '#88B04B', '#F7CAC9']  # 柔和的橙红、紫色、绿色、粉色\n",
    "\n",
    "# 绘制原始模型的残差分布\n",
    "y_pred_test_orig = original_steinmetz_eq(X_test['频率'], X_test['Bm'], k_orig, alpha_orig, beta_orig)\n",
    "residuals_orig = y_test - y_pred_test_orig\n",
    "axes[0].hist(residuals_orig, bins=30, color=colors[3], alpha=0.7, edgecolor='black', linewidth=1.2)\n",
    "axes[0].axvline(0, color='red', linestyle='--', linewidth=2)\n",
    "axes[0].grid(True, linestyle='--', alpha=0.6)\n",
    "axes[0].set_title('原始斯坦麦茨方程残差分布', fontsize=16, fontweight='bold')\n",
    "axes[0].set_xlabel('残差', fontsize=16)\n",
    "axes[0].set_ylabel('频数', fontsize=16)\n",
    "\n",
    "# 计算各优化算法的预测值并绘制残差分布\n",
    "for i, (name, params) in enumerate(results.items()):\n",
    "    # 计算测试集的预测值\n",
    "    y_pred_test_mod = modified_steinmetz_eq(X_test['频率'], X_test['Bm'], X_test['温度'], *params)\n",
    "    \n",
    "    # 计算残差\n",
    "    residuals = y_test - y_pred_test_mod\n",
    "\n",
    "    # 绘制残差的直方图\n",
    "    axes[i+1].hist(residuals, bins=30, color=colors[i], alpha=0.7, edgecolor='black', linewidth=1.2)\n",
    "    axes[i+1].axvline(0, color='red', linestyle='--', linewidth=2)\n",
    "    axes[i+1].grid(True, linestyle='--', alpha=0.6)\n",
    "    axes[i+1].set_title(f'{name} 修正模型残差分布', fontsize=16, fontweight='bold')\n",
    "    axes[i+1].set_xlabel('残差', fontsize=16)\n",
    "    axes[i+1].set_ylabel('频数', fontsize=16)\n",
    "\n",
    "# 调整子图间距\n",
    "plt.tight_layout()\n",
    "# 显示图表\n",
    "plt.show()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py38torch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
