{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "\n",
    "from target_assign_rl import IQLAgent, RuleAgent, TaskAllocationAEC, raw_env"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "env = raw_env(\n",
    "    dict(\n",
    "        min_drones=20,\n",
    "        possible_level=[0, 0.1, 0.4, 0.8],\n",
    "        threat_dist=[0.15, 0.25, 0.35, 0.25],\n",
    "    )\n",
    ")\n",
    "\n",
    "env.reset()\n",
    "state_dim = env.state().shape[0]\n",
    "action_dim = env.action_space(env.agents[0]).n\n",
    "\n",
    "a20_ckpt = \"checkpoint_A20.pth\"\n",
    "rule_agent = RuleAgent(num_threats=20)\n",
    "a20_agent = IQLAgent(state_dim, action_dim)\n",
    "a20_agent.load_checkpoint(a20_ckpt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def simulate_drone_lost(\n",
    "    trained_agent, compare_agent=None, num_episodes=100, max_drone_lost=4, env=None\n",
    "):\n",
    "    if env is None:\n",
    "        env = TaskAllocationAEC(dict(min_drones=20))\n",
    "\n",
    "    compare_agent = trained_agent if compare_agent is None else compare_agent\n",
    "    comparative_data = []\n",
    "\n",
    "    for episode in range(num_episodes):\n",
    "        env.reset()\n",
    "        drone_lost = np.random.randint(0, max_drone_lost + 1)\n",
    "\n",
    "        for i, agent in enumerate(env.agents):\n",
    "            state = env.state()\n",
    "            action_mask = env.action_mask(agent)\n",
    "            action = trained_agent.predict(state, action_mask)\n",
    "            if drone_lost > 0 and i == len(env.agents) - 1:\n",
    "                while True:\n",
    "                    lost_drones = np.random.choice(env.agents, drone_lost, replace=False)\n",
    "                    if agent not in lost_drones:\n",
    "                        break\n",
    "                for drone in lost_drones:\n",
    "                    env.truncations[drone] = True\n",
    "            env.step(action)\n",
    "\n",
    "        _, original_reward, _, __, original_info = env.last()\n",
    "\n",
    "        # num_lost env\n",
    "        num_drones = 20 - drone_lost\n",
    "        lost_env = TaskAllocationAEC(\n",
    "            dict(\n",
    "                min_drones=num_drones,\n",
    "                max_drones=num_drones,\n",
    "                attack_prob=env.attack_prob,\n",
    "                possible_level=env.possible_level,\n",
    "                threat_dist=env.threat_dist,\n",
    "            )\n",
    "        )\n",
    "        lost_env.reset()\n",
    "        lost_env.threat_levels = env.threat_levels.copy()\n",
    "        lost_env.actual_threats = env.actual_threats.copy()\n",
    "        lost_env.num_actual_threat = env.num_actual_threat\n",
    "        lost_env.pre_allocation = lost_env.calculate_pre_allocation()\n",
    "\n",
    "        new_assignments = []\n",
    "        for agent in lost_env.agents:\n",
    "            state = lost_env.state()\n",
    "            action_mask = lost_env.action_mask(agent)\n",
    "            action = compare_agent.predict(state, action_mask)\n",
    "            lost_env.step(action)\n",
    "\n",
    "        _, new_reward, _, __, new_info = lost_env.last()\n",
    "\n",
    "        episode_data = {\n",
    "            \"episode\": episode,\n",
    "            \"threat_levels\": env.threat_levels.copy(),\n",
    "            \"num_actual_threat\": env.num_actual_threat,\n",
    "            \"num_drones_lost\": drone_lost,\n",
    "            \"original_assignments\": env.actual_allocation.copy(),\n",
    "            \"original_reward\": original_reward,\n",
    "            \"original_info\": original_info,\n",
    "            \"original_coverage\": original_info[\"coverage\"],\n",
    "            \"original_threat_destroyed\": original_info[\"threat_destroyed\"],\n",
    "            \"original_drone_lost\": original_info[\"drone_lost\"],\n",
    "            \"original_kd_ratio\": original_info[\"kd_ratio\"],\n",
    "            \"original_remaining_threat\": original_info[\"num_remaining_threat\"],\n",
    "            \"new_assignments\": lost_env.actual_allocation.copy(),\n",
    "            \"new_reward\": new_reward,\n",
    "            \"new_info\": new_info,\n",
    "            \"new_coverage\": new_info[\"coverage\"],\n",
    "            \"new_threat_destroyed\": new_info[\"threat_destroyed\"],\n",
    "            \"new_drone_lost\": new_info[\"drone_lost\"],\n",
    "            \"new_kd_ratio\": new_info[\"kd_ratio\"],\n",
    "            \"new_remaining_threat\": new_info[\"num_remaining_threat\"],\n",
    "        }\n",
    "        comparative_data.append(episode_data)\n",
    "\n",
    "    return comparative_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def analyze_compare_data(comparative_data):\n",
    "    df = pd.DataFrame(comparative_data)\n",
    "\n",
    "    fig, axs = plt.subplots(4, 2, figsize=(20, 30))\n",
    "    fig.suptitle(\"Comparison of Original and New Allocation Strategies\", fontsize=16)\n",
    "\n",
    "    # 1. 奖励对比\n",
    "    axs[0, 0].scatter(df[\"original_reward\"], df[\"new_reward\"])\n",
    "    axs[0, 0].plot(\n",
    "        [df[\"original_reward\"].min(), df[\"original_reward\"].max()],\n",
    "        [df[\"original_reward\"].min(), df[\"original_reward\"].max()],\n",
    "        \"r--\",\n",
    "    )\n",
    "    axs[0, 0].set_xlabel(\"Original Reward\")\n",
    "    axs[0, 0].set_ylabel(\"New Reward\")\n",
    "    axs[0, 0].set_title(\"Reward Comparison\")\n",
    "\n",
    "    # 2. 覆盖率对比\n",
    "    axs[0, 1].scatter(df[\"original_coverage\"], df[\"new_coverage\"])\n",
    "    axs[0, 1].plot([0, 1], [0, 1], \"r--\")\n",
    "    axs[0, 1].set_xlabel(\"Original Coverage\")\n",
    "    axs[0, 1].set_ylabel(\"New Coverage\")\n",
    "    axs[0, 1].set_title(\"Coverage Comparison\")\n",
    "\n",
    "    # 3. 威胁消除对比\n",
    "    axs[1, 0].scatter(df[\"original_threat_destroyed\"], df[\"new_threat_destroyed\"])\n",
    "    axs[1, 0].plot(\n",
    "        [df[\"original_threat_destroyed\"].min(), df[\"original_threat_destroyed\"].max()],\n",
    "        [df[\"original_threat_destroyed\"].min(), df[\"original_threat_destroyed\"].max()],\n",
    "        \"r--\",\n",
    "    )\n",
    "    axs[1, 0].set_xlabel(\"Original Threats Destroyed\")\n",
    "    axs[1, 0].set_ylabel(\"New Threats Destroyed\")\n",
    "    axs[1, 0].set_title(\"Threat Destruction Comparison\")\n",
    "\n",
    "    # 4. 无人机损失对比\n",
    "    axs[1, 1].scatter(df[\"original_drone_lost\"], df[\"new_drone_lost\"])\n",
    "    axs[1, 1].plot(\n",
    "        [df[\"original_drone_lost\"].min(), df[\"original_drone_lost\"].max()],\n",
    "        [df[\"original_drone_lost\"].min(), df[\"original_drone_lost\"].max()],\n",
    "        \"r--\",\n",
    "    )\n",
    "    axs[1, 1].set_xlabel(\"Original Drones Lost\")\n",
    "    axs[1, 1].set_ylabel(\"New Drones Lost\")\n",
    "    axs[1, 1].set_title(\"Drone Loss Comparison\")\n",
    "\n",
    "    # 5. K/D比率对比\n",
    "    axs[2, 0].scatter(df[\"original_kd_ratio\"], df[\"new_kd_ratio\"])\n",
    "    axs[2, 0].plot(\n",
    "        [df[\"original_kd_ratio\"].min(), df[\"original_kd_ratio\"].max()],\n",
    "        [df[\"original_kd_ratio\"].min(), df[\"original_kd_ratio\"].max()],\n",
    "        \"r--\",\n",
    "    )\n",
    "    axs[2, 0].set_xlabel(\"Original K/D Ratio\")\n",
    "    axs[2, 0].set_ylabel(\"New K/D Ratio\")\n",
    "    axs[2, 0].set_title(\"K/D Ratio Comparison\")\n",
    "\n",
    "    # 6. 剩余威胁对比\n",
    "    axs[2, 1].scatter(df[\"original_remaining_threat\"], df[\"new_remaining_threat\"])\n",
    "    axs[2, 1].plot(\n",
    "        [df[\"original_remaining_threat\"].min(), df[\"original_remaining_threat\"].max()],\n",
    "        [df[\"original_remaining_threat\"].min(), df[\"original_remaining_threat\"].max()],\n",
    "        \"r--\",\n",
    "    )\n",
    "    axs[2, 1].set_xlabel(\"Original Remaining Threat\")\n",
    "    axs[2, 1].set_ylabel(\"New Remaining Threat\")\n",
    "    axs[2, 1].set_title(\"Remaining Threat Comparison\")\n",
    "\n",
    "    # 7. 损失无人机数量与性能改进的关系\n",
    "    improvement = df[\"new_reward\"] - df[\"original_reward\"]\n",
    "    axs[3, 0].scatter(df[\"num_drones_lost\"], improvement)\n",
    "    axs[3, 0].set_xlabel(\"Number of Drones Lost\")\n",
    "    axs[3, 0].set_ylabel(\"Reward Improvement\")\n",
    "    axs[3, 0].set_title(\"Drone Loss vs Performance Improvement\")\n",
    "\n",
    "    # 8. 平均威胁等级和分配情况热力图\n",
    "    avg_original_assignments = np.mean(\n",
    "        [\n",
    "            np.bincount(episode[\"original_assignments\"], minlength=20)\n",
    "            for episode in comparative_data\n",
    "        ],\n",
    "        axis=0,\n",
    "    )\n",
    "    avg_new_assignments = np.mean(\n",
    "        [\n",
    "            np.bincount(episode[\"new_assignments\"], minlength=20)\n",
    "            for episode in comparative_data\n",
    "        ],\n",
    "        axis=0,\n",
    "    )\n",
    "    avg_threat_levels = np.mean(\n",
    "        [episode[\"threat_levels\"] for episode in comparative_data], axis=0\n",
    "    )\n",
    "\n",
    "    assignment_data = np.vstack(\n",
    "        (avg_threat_levels, avg_original_assignments, avg_new_assignments)\n",
    "    )\n",
    "    sns.heatmap(assignment_data, ax=axs[3, 1], cmap=\"YlOrRd\", annot=True, fmt=\".2f\")\n",
    "    axs[3, 1].set_title(\"Average Threat Levels and Assignments\")\n",
    "    axs[3, 1].set_ylabel(\"Threat Level | Original | New\")\n",
    "    axs[3, 1].set_xlabel(\"Threat Position\")\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "    print(\"\\nAverage improvements:\")\n",
    "    print(f\"Reward: {(df['new_reward'] - df['original_reward']).mean():.4f}\")\n",
    "    print(f\"Coverage: {(df['new_coverage'] - df['original_coverage']).mean():.4f}\")\n",
    "    print(f\"Threats destroyed: {(df['new_threat_destroyed'] - df['original_threat_destroyed']).mean():.4f}\")\n",
    "    print(f\"Drones lost: {(df['new_drone_lost'] - df['original_drone_lost']).mean():.4f}\")\n",
    "    print(f\"K/D ratio: {(df['new_kd_ratio'] - df['original_kd_ratio']).mean():.4f}\")\n",
    "    print(f\"Remaining threat: {(df['new_remaining_threat'] - df['original_remaining_threat']).mean():.4f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "compare_data = simulate_drone_lost(a20_agent, rule_agent, num_episodes=1000, max_drone_lost=0, env=env)\n",
    "df = pd.DataFrame(compare_data)\n",
    "df.describe()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "analyze_compare_data(compare_data)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "driving_gym",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
