#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：DB6.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/24 10:07 
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from collections import deque
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from typing import List, Dict, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from collections import deque
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from typing import List, Dict, Tuple

# -------------------------- 基础配置 --------------------------
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 中文显示设置
plt.rcParams["axes.unicode_minus"] = False
plt.rcParams["font.family"] = ["SimHei", "SimHei", "SimHei", "SimHei"]


# -------------------------- 环境建模（MDP） --------------------------
class GasSchedulingEnv:
    def __init__(self,
                 num_workpieces: int = 50,
                 num_components: int = 3,
                 num_inflation_eq: int = 10,
                 num_analysis_eq_per_component: int = 3):
        self.num_workpieces = num_workpieces
        self.num_components = num_components
        self.num_inflation_eq = num_inflation_eq
        self.num_analysis_eq = num_analysis_eq_per_component

        self.inflation_time = np.random.randint(5, 15, size=num_workpieces)
        self.analysis_time = np.random.randint(3, 10, size=(num_workpieces, num_components))
        self.reset()

    def reset(self) -> np.ndarray:
        self.inflation_eq_available = np.zeros(self.num_inflation_eq, dtype=np.float32)
        self.analysis_eq_available = np.zeros((self.num_components, self.num_analysis_eq), dtype=np.float32)
        self.workpiece_inflated = np.zeros(self.num_workpieces, dtype=bool)
        self.workpiece_analyzed = np.zeros(self.num_workpieces, dtype=bool)
        self.completed_workpieces = 0
        self.global_time = 0.0
        return self._encode_state()

    def _encode_state(self) -> np.ndarray:
        state_components = [
            self.inflation_eq_available / 200.0,
            self.analysis_eq_available.flatten() / 200.0,
            self.workpiece_inflated.astype(int),
            self.workpiece_analyzed.astype(int)
        ]
        return np.concatenate(state_components, dtype=np.float32)

    def step(self, action: Tuple[int, int, List[Tuple[int, int]]]) -> Tuple[np.ndarray, float, bool, Dict]:
        wp_id, inflate_eq_id, analysis_assign = action

        # 动作合法性检查
        valid = True
        reason = ""
        if self.workpiece_inflated[wp_id]:
            valid = False
            reason = f"工件{wp_id}已完成充气"
        elif not (0 <= inflate_eq_id < self.num_inflation_eq):
            valid = False
            reason = f"充气设备ID{inflate_eq_id}无效"
        elif len(analysis_assign) != self.num_components:
            valid = False
            reason = f"分析分配长度不匹配（需{self.num_components}个，实际{len(analysis_assign)}个）"
        else:
            for comp_id, eq_id in analysis_assign:
                if not (0 <= comp_id < self.num_components) or not (0 <= eq_id < self.num_analysis_eq):
                    valid = False
                    reason = f"分析分配(组分{comp_id}, 设备{eq_id})无效"
                    break

        # 核心修复：无论动作是否有效，info都包含action_valid键
        if not valid:
            return (
                self._encode_state(),
                -10.0,
                False,
                {
                    "info": reason,
                    "global_time": self.global_time,
                    "completed": self.completed_workpieces,
                    "action_valid": False  # 无效动作明确标记
                }
            )

        # 执行充气操作
        inflate_start = max(self.global_time, self.inflation_eq_available[inflate_eq_id])
        inflate_end = inflate_start + self.inflation_time[wp_id]
        self.inflation_eq_available[inflate_eq_id] = inflate_end
        self.workpiece_inflated[wp_id] = True

        # 执行分析操作
        analysis_start = inflate_end
        max_analysis_end = analysis_start
        for comp_id, eq_id in analysis_assign:
            comp_start = max(analysis_start, self.analysis_eq_available[comp_id, eq_id])
            comp_end = comp_start + self.analysis_time[wp_id, comp_id]
            self.analysis_eq_available[comp_id, eq_id] = comp_end
            max_analysis_end = max(max_analysis_end, comp_end)

        # 更新全局状态
        self.global_time = max(inflate_end, max_analysis_end)
        self.workpiece_analyzed[wp_id] = True
        self.completed_workpieces += 1

        # 计算奖励
        if self.completed_workpieces == self.num_workpieces:
            reward = 1000.0 / self.global_time
        else:
            time_increment = self.global_time - inflate_start
            reward = -time_increment

        done = (self.completed_workpieces == self.num_workpieces)
        return self._encode_state(), reward, done, {
            "global_time": round(self.global_time, 2),
            "completed_workpieces": self.completed_workpieces,
            "current_workpiece": wp_id,
            "action_valid": True  # 有效动作明确标记
        }

    def render(self, schedule_log: List[Dict]) -> None:
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(14, 10))
        fig.suptitle(f"DQN气体调度结果（总时间：{self.global_time:.2f}）", fontsize=16)

        colors = plt.cm.tab10(np.linspace(0, 1, self.num_workpieces))

        # 绘制充气阶段
        ax1.set_title("充气阶段", fontsize=12)
        ax1.set_xlabel("时间")
        ax1.set_ylabel("充气设备")
        y_ticks_inflate = []
        y_labels_inflate = []

        for eq_id in range(self.num_inflation_eq):
            y_pos = eq_id + 1
            y_ticks_inflate.append(y_pos)
            y_labels_inflate.append(f"充气设备{eq_id}")

            for log in schedule_log:
                if log["stage"] == "inflate" and log["eq_id"] == eq_id:
                    wp_id = log["wp_id"]
                    start = log["start"]
                    end = log["end"]
                    ax1.barh(y_pos, end - start, left=start, height=0.6,
                             color=colors[wp_id], edgecolor="black", alpha=0.8)
                    ax1.text(start + (end - start) / 2, y_pos, f"瓶{wp_id}",
                             ha="center", va="center", color="white", fontweight="bold")

        ax1.set_yticks(y_ticks_inflate)
        ax1.set_yticklabels(y_labels_inflate)
        ax1.grid(axis="x", linestyle="--", alpha=0.5)

        # 绘制分析阶段
        ax2.set_title("分析阶段", fontsize=12)
        ax2.set_xlabel("时间")
        ax2.set_ylabel("分析设备（组分-设备）")
        y_ticks_analysis = []
        y_labels_analysis = []
        current_y = 1

        for comp_id in range(self.num_components):
            for eq_id in range(self.num_analysis_eq):
                y_pos = current_y
                y_ticks_analysis.append(y_pos)
                y_labels_analysis.append(f"组分{comp_id}-设备{eq_id}")

                for log in schedule_log:
                    if log["stage"] == "analysis" and log["comp_id"] == comp_id and log["eq_id"] == eq_id:
                        wp_id = log["wp_id"]
                        start = log["start"]
                        end = log["end"]
                        ax2.barh(y_pos, end - start, left=start, height=0.6,
                                 color=colors[wp_id], edgecolor="black", alpha=0.8)
                        label = f"瓶{wp_id}\n({log['time']:.0f})"
                        ax2.text(start + (end - start) / 2, y_pos, label,
                                 ha="center", va="center", color="white", fontsize=8, fontweight="bold")

                current_y += 1

        ax2.set_yticks(y_ticks_analysis)
        ax2.set_yticklabels(y_labels_analysis, fontsize=10)
        ax2.grid(axis="x", linestyle="--", alpha=0.5)

        # 添加图例
        legend_elements = [mpatches.Patch(color=colors[i], label=f"气瓶{i}")
                           for i in range(self.num_workpieces)]
        fig.legend(handles=legend_elements, loc="lower center", ncol=self.num_workpieces,
                   bbox_to_anchor=(0.5, 0.02))

        plt.tight_layout(rect=[0, 0.05, 1, 0.95])
        plt.show()


# -------------------------- DQN网络模型 --------------------------
class DQNNetwork(nn.Module):
    def __init__(self, state_dim: int, action_dim: int, hidden_dim: int = 256):
        super(DQNNetwork, self).__init__()
        self.network = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim)
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.network(x)


# -------------------------- DQN智能体 --------------------------
class DQNAgent:
    def __init__(self, state_dim: int, action_dim: int,
                 gamma: float = 0.99,
                 epsilon: float = 1.0,
                 epsilon_decay: float = 0.995,
                 epsilon_min: float = 0.01,
                 lr: float = 1e-3,
                 batch_size: int = 64,
                 memory_capacity: int = 10000):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.epsilon_min = epsilon_min
        self.batch_size = batch_size

        self.memory = deque(maxlen=memory_capacity)
        self.policy_net = DQNNetwork(state_dim, action_dim).to(device)
        self.target_net = DQNNetwork(state_dim, action_dim).to(device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=lr)
        self.loss_fn = nn.MSELoss()

    def store_experience(self, s: np.ndarray, a: int, r: float, s_prime: np.ndarray, done: bool) -> None:
        self.memory.append((s, a, r, s_prime, done))

    def select_action(self, s: np.ndarray, train_mode: bool = True) -> int:
        if train_mode and np.random.rand() < self.epsilon:
            return random.randint(0, self.action_dim - 1)
        else:
            s_tensor = torch.tensor(s, dtype=torch.float32).unsqueeze(0).to(device)
            with torch.no_grad():
                q_values = self.policy_net(s_tensor)
            return torch.argmax(q_values).item()

    def train_step(self) -> float:
        if len(self.memory) < self.batch_size:
            return 0.0

        batch = random.sample(self.memory, self.batch_size)

        # 核心修复：先转换为统一的numpy数组，再转张量（4处修改）
        s_batch = torch.tensor(
            np.array([exp[0] for exp in batch]),  # 先转numpy数组
            dtype=torch.float32
        ).to(device)

        a_batch = torch.tensor(
            [exp[1] for exp in batch],
            dtype=torch.long
        ).to(device)

        r_batch = torch.tensor(
            [exp[2] for exp in batch],
            dtype=torch.float32
        ).to(device)

        s_prime_batch = torch.tensor(
            np.array([exp[3] for exp in batch]),  # 先转numpy数组
            dtype=torch.float32
        ).to(device)

        done_batch = torch.tensor(
            [exp[4] for exp in batch],
            dtype=torch.bool
        ).to(device)

        # 后续计算逻辑不变...
        q_current = self.policy_net(s_batch).gather(1, a_batch.unsqueeze(1)).squeeze(1)
        with torch.no_grad():
            q_prime_max = self.target_net(s_prime_batch).max(1)[0]
            q_target = r_batch + self.gamma * q_prime_max * (~done_batch)

        loss = self.loss_fn(q_current, q_target)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

        return loss.item()
    def update_target_net(self) -> None:
        self.target_net.load_state_dict(self.policy_net.state_dict())

    def save_model(self, path: str = "dqn_gas_scheduling.pth") -> None:
        torch.save(self.policy_net.state_dict(), path)
        print(f"模型已保存到: {path}")

    def load_model(self, path: str = "dqn_gas_scheduling.pth") -> None:
        self.policy_net.load_state_dict(torch.load(path, map_location=device))
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()
        print(f"模型已从: {path} 加载")


# -------------------------- 动作空间映射 --------------------------
def action_idx_to_schedule(action_idx: int, env: GasSchedulingEnv) -> Tuple[int, int, List[Tuple[int, int]]]:
    num_wp = env.num_workpieces
    num_inflate_eq = env.num_inflation_eq
    num_analysis_eq = env.num_analysis_eq
    num_components = env.num_components

    analysis_action_base = num_analysis_eq ** num_components
    inflate_action_base = num_inflate_eq * analysis_action_base

    wp_idx = action_idx // inflate_action_base
    remaining_after_wp = action_idx % inflate_action_base

    inflate_eq_idx = remaining_after_wp // analysis_action_base
    analysis_idx = remaining_after_wp % analysis_action_base

    analysis_assign = []
    temp_idx = analysis_idx
    for comp_id in range(num_components):
        eq_id = temp_idx % num_analysis_eq
        analysis_assign.append((comp_id, eq_id))
        temp_idx = temp_idx // num_analysis_eq

    wp_idx = wp_idx % num_wp
    inflate_eq_idx = inflate_eq_idx % num_inflate_eq

    return (wp_idx, inflate_eq_idx, analysis_assign)


# -------------------------- 主训练流程 --------------------------
def train_dqn_gas_scheduling():
    env_params = {
        "num_workpieces": 10,
        "num_components": 3,
        "num_inflation_eq": 5,
        "num_analysis_eq_per_component": 2
    }
    env = GasSchedulingEnv(**env_params)

    num_components = env.num_components
    num_analysis_eq = env.num_analysis_eq
    analysis_action_base = num_analysis_eq ** num_components
    action_dim = env.num_workpieces * env.num_inflation_eq * analysis_action_base
    state_dim = len(env.reset())
    print(f"状态维度: {state_dim}, 动作空间大小: {action_dim}")

    agent = DQNAgent(
        state_dim=state_dim,
        action_dim=action_dim,
        gamma=0.99,
        epsilon=1.0,
        epsilon_decay=0.995,
        lr=1e-3,
        batch_size=64,
        memory_capacity=50000
    )

    num_episodes = 12000
    target_update_freq = 50
    log_freq = 100
    total_rewards = []
    total_times = []

    print("\n开始训练DQN气体调度模型...")
    for episode in range(1, num_episodes + 1):
        s = env.reset()
        episode_reward = 0.0
        done = False
        step_count = 0
        schedule_log = []

        while not done:
            action_idx = agent.select_action(s, train_mode=True)
            action = action_idx_to_schedule(action_idx, env)
            wp_id, inflate_eq_id, analysis_assign = action

            s_prime, r, done, info = env.step(action)

            # 现在可以安全访问action_valid，因为两种情况都有该键
            if info["action_valid"]:
                inflate_start = max(env.global_time - env.inflation_time[wp_id], 0)
                schedule_log.append({
                    "stage": "inflate",
                    "wp_id": wp_id,
                    "eq_id": inflate_eq_id,
                    "start": inflate_start,
                    "end": env.global_time,
                    "time": env.inflation_time[wp_id]
                })
                analysis_start = inflate_start + env.inflation_time[wp_id]
                for comp_id, eq_id in analysis_assign:
                    comp_time = env.analysis_time[wp_id, comp_id]
                    comp_end = analysis_start + comp_time
                    schedule_log.append({
                        "stage": "analysis",
                        "wp_id": wp_id,
                        "comp_id": comp_id,
                        "eq_id": eq_id,
                        "start": analysis_start,
                        "end": comp_end,
                        "time": comp_time
                    })

            agent.store_experience(s, action_idx, r, s_prime, done)
            loss = agent.train_step()
            episode_reward += r
            s = s_prime
            step_count += 1

        total_rewards.append(episode_reward)
        total_times.append(env.global_time)

        if episode % target_update_freq == 0:
            agent.update_target_net()

        if episode % log_freq == 0:
            avg_reward = np.mean(total_rewards[-log_freq:])
            avg_time = np.mean(total_times[-log_freq:])
            print(f"Episode {episode:4d} | "
                  f"Avg Reward: {avg_reward:6.2f} | "
                  f"Avg Time: {avg_time:6.2f} | "
                  f"Epsilon: {agent.epsilon:.3f} | "
                  f"Steps: {step_count:2d}")

    agent.save_model()
    print(f"\n训练结束！平均总时间: {np.mean(total_times[-100:]):.2f}")

    print("\n渲染最后一轮调度结果甘特图...")
    env.render(schedule_log)

    # 绘制训练曲线
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8))
    fig.suptitle("DQN训练曲线", fontsize=16)

    ax1.plot(range(1, num_episodes + 1), total_rewards, color="blue", alpha=0.7)
    if num_episodes >= 10:
        smooth_rewards = np.convolve(total_rewards, np.ones(10) / 10, mode="valid")
        ax1.plot(range(10, num_episodes + 1), smooth_rewards, color="red", linewidth=2, label="平滑奖励")
    ax1.set_xlabel("训练轮次")
    ax1.set_ylabel("每轮总奖励")
    ax1.legend()
    ax1.grid(linestyle="--", alpha=0.5)

    ax2.plot(range(1, num_episodes + 1), total_times, color="green", alpha=0.7)
    if num_episodes >= 10:
        smooth_times = np.convolve(total_times, np.ones(10) / 10, mode="valid")
        ax2.plot(range(10, num_episodes + 1), smooth_times, color="orange", linewidth=2, label="平滑总时间")
    ax2.set_xlabel("训练轮次")
    ax2.set_ylabel("每轮总调度时间")
    ax2.legend()
    ax2.grid(linestyle="--", alpha=0.5)

    plt.tight_layout(rect=[0, 0, 1, 0.95])
    plt.show()


if __name__ == "__main__":
    train_dqn_gas_scheduling()
