#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：DB8.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/26 21:51 
'''
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
基于深度强化学习(PPO)的气体调度优化方案 
'''
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
from typing import List, Tuple, Dict, Any

# 设置中文显示
plt.rcParams["font.family"] = ["SimHei", "SimHei", "SimHei"]
plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示问题

# 确保使用GPU（如果可用）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class GasSchedulingEnv:
    """气体调度环境，用于强化学习训练"""

    def __init__(self,
                 num_workpieces: int,
                 num_components: int,
                 num_inflation_eq: int,
                 num_analysis_eq_per_component: int):
        """初始化环境参数"""
        self.num_workpieces = num_workpieces
        self.num_components = num_components
        self.num_inflation_eq = num_inflation_eq
        self.num_analysis_eq_per_component = num_analysis_eq_per_component

        # 随机生成加工时间（实际应用中替换为真实数据）
        self.inflation_time = np.random.randint(5, 15, size=num_workpieces)
        self.analysis_time = np.random.randint(3, 10, size=(num_workpieces, num_components))

        # 重置环境
        self.reset()

    def reset(self) -> np.ndarray:
        """重置环境到初始状态"""
        # 充气设备状态：记录每个设备的可用时间
        self.inflation_eq_available = np.zeros(self.num_inflation_eq)
        # 分析设备状态：记录每个组分的每个设备的可用时间
        self.analysis_eq_available = np.zeros((self.num_components, self.num_analysis_eq_per_component))

        # 工件状态：0-未处理，1-充气中，2-充气完成，3-分析中，4-完成
        self.workpiece_status = np.zeros(self.num_workpieces, dtype=int)
        # 记录每个工件的充气完成时间
        self.workpiece_inflation_end = np.zeros(self.num_workpieces)

        # 已完成充气的工件列表
        self.inflated_workpieces = []
        # 已完成分析的工件列表
        self.analyzed_workpieces = []

        # 当前时间
        self.current_time = 0

        # 重置分组信息
        self.component_groups = []
        for _ in range(self.num_workpieces):
            max_groups = random.randint(1, self.num_components)
            groups = []
            for i in range(self.num_components):
                if i < max_groups:
                    groups.append(i)
                else:
                    groups.append(random.randint(0, max_groups - 1))
            self.component_groups.append(groups)

        # 返回初始状态
        return self.get_state()

    def get_state(self) -> np.ndarray:
        """获取当前环境状态的向量表示"""
        # 状态包括：
        # 1. 每个充气设备的可用时间归一化
        # 2. 每个分析设备的可用时间归一化
        # 3. 每个工件的状态
        # 4. 每个工件的充气完成时间归一化

        # 最大可能时间（用于归一化）
        max_possible_time = np.sum(self.inflation_time) + np.sum(self.analysis_time)

        # 充气设备状态（归一化）
        inflation_state = self.inflation_eq_available / max_possible_time if max_possible_time > 0 else self.inflation_eq_available

        # 分析设备状态（归一化并展平）
        analysis_state = self.analysis_eq_available.flatten() / max_possible_time if max_possible_time > 0 else self.analysis_eq_available.flatten()

        # 工件状态（已归一化）
        workpiece_state = self.workpiece_status / 4  # 4是最大状态值

        # 工件充气完成时间（归一化）
        inflation_end_state = self.workpiece_inflation_end / max_possible_time if max_possible_time > 0 else self.workpiece_inflation_end

        # 合并所有状态
        state = np.concatenate([
            inflation_state,
            analysis_state,
            workpiece_state,
            inflation_end_state
        ])

        return state

    def step(self, action: Tuple[int, int, List[List[int]]]) -> Tuple[np.ndarray, float, bool, Dict]:
        """
        执行一步动作
        action: 包含三个部分
            - 选择的工件索引
            - 充气设备索引
            - 分析设备分配方案（每个组分配的设备）
        返回: 新状态, 奖励, 是否结束, 额外信息
        """
        workpiece_idx, inflation_eq_idx, analysis_eq_assignment = action

        # 检查动作有效性
        if (workpiece_idx < 0 or workpiece_idx >= self.num_workpieces or
                self.workpiece_status[workpiece_idx] != 0 or  # 只有未处理的工件可以被选中
                inflation_eq_idx < 0 or inflation_eq_idx >= self.num_inflation_eq):
            # 无效动作给予惩罚
            return self.get_state(), -100.0, False, {"invalid_action": True}

        # 记录当前状态用于计算奖励
        prev_time = self.current_time

        # 执行充气操作
        start_time = max(self.current_time, self.inflation_eq_available[inflation_eq_idx])
        end_time = start_time + self.inflation_time[workpiece_idx]

        # 更新充气设备状态
        self.inflation_eq_available[inflation_eq_idx] = end_time
        # 更新工件状态
        self.workpiece_status[workpiece_idx] = 2  # 标记为充气完成
        self.workpiece_inflation_end[workpiece_idx] = end_time
        self.inflated_workpieces.append(workpiece_idx)

        # 更新当前时间为最早可用时间
        self.current_time = min(self.inflation_eq_available)

        # 执行分析操作（如果有已充气完成的工件）
        analysis_reward = 0
        while self.inflated_workpieces:
            # 选择最早完成充气的工件
            earliest_idx = np.argmin([self.workpiece_inflation_end[i] for i in self.inflated_workpieces])
            wp = self.inflated_workpieces.pop(earliest_idx)

            self.workpiece_status[wp] = 3  # 标记为分析中

            # 获取该工件的分组信息
            groups = self.component_groups[wp]
            unique_groups = list(sorted(set(groups)))

            # 分析开始时间（必须在充气完成后）
            start_analysis_time = self.workpiece_inflation_end[wp]

            # 按组进行分析
            for group_idx, group_id in enumerate(unique_groups):
                # 获取该组包含的组分
                components_in_group = [j for j, g in enumerate(groups) if g == group_id]

                # 检查分析设备分配是否有效
                if group_idx >= len(analysis_eq_assignment):
                    return self.get_state(), -50.0, False, {"invalid_analysis_assignment": True}

                eq_for_group = analysis_eq_assignment[group_idx]
                if len(eq_for_group) != len(components_in_group):
                    return self.get_state(), -50.0, False, {"invalid_analysis_assignment_length": True}

                # 计算组准备时间（所有设备都准备好才能开始）
                group_ready_time = start_analysis_time
                for comp_idx, component in enumerate(components_in_group):
                    eq = eq_for_group[comp_idx]
                    if eq < 0 or eq >= self.num_analysis_eq_per_component:
                        return self.get_state(), -50.0, False, {"invalid_analysis_eq": True}
                    group_ready_time = max(group_ready_time, self.analysis_eq_available[component][eq])

                # 计算串联组的总时间（取最长的单个测试时间）
                max_analysis_time = max(
                    self.analysis_time[wp][component]
                    for component in components_in_group
                ) if components_in_group else 0

                # 所有串联设备的结束时间相同
                group_end_time = group_ready_time + max_analysis_time

                # 更新所有串联设备的时间
                for comp_idx, component in enumerate(components_in_group):
                    eq = eq_for_group[comp_idx]
                    self.analysis_eq_available[component][eq] = group_end_time

                # 更新组的完成时间，作为下一组的开始时间下限
                start_analysis_time = group_end_time

            # 分析完成
            self.workpiece_status[wp] = 4  # 标记为完成
            self.analyzed_workpieces.append(wp)

            # 更新当前时间
            self.current_time = max(self.current_time, start_analysis_time)

        # 计算奖励：时间节省越多，奖励越高
        time_taken = self.current_time - prev_time
        reward = 100.0 / (1.0 + time_taken)  # 时间越短，奖励越高

        # 检查是否所有工件都已处理完成
        done = np.all(self.workpiece_status == 4)

        # 如果完成，给予额外奖励
        if done:
            reward += 1000.0 / (1.0 + self.current_time)  # 总时间越短，额外奖励越高

        return self.get_state(), reward, done, {"time_taken": time_taken, "current_time": self.current_time}

    def get_schedule_details(self) -> Tuple[Dict, Dict]:
        """获取调度详细信息，用于绘制甘特图"""
        # 简化实现，记录每个设备处理的工件及其时间
        inflation_details = {i: [] for i in range(self.num_inflation_eq)}
        analysis_details = {i: {j: [] for j in range(self.num_analysis_eq_per_component)}
                            for i in range(self.num_components)}

        return inflation_details, analysis_details


class ActorCritic(nn.Module):
    """PPO算法中的Actor-Critic网络"""

    def __init__(self, state_dim: int, action_dims: Dict[str, int]):
        super(ActorCritic, self).__init__()

        self.state_dim = state_dim
        self.action_dims = action_dims  # 包含不同动作维度的信息

        # 共享的特征提取网络
        self.shared_net = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.Tanh(),
            nn.Linear(256, 128),
            nn.Tanh(),
            nn.Linear(128, 64),
            nn.Tanh()
        )

        # Actor网络：输出动作概率
        self.actor_workpiece = nn.Linear(64, action_dims['workpiece'])
        self.actor_inflation_eq = nn.Linear(64, action_dims['inflation_eq'])

        # 分析设备分配的策略网络（简化处理）
        self.actor_analysis_base = nn.Linear(64, 64)
        self.actor_analysis_groups = nn.Linear(64, action_dims['max_groups'])
        self.actor_analysis_eq = nn.Linear(64, action_dims['analysis_eq'] * action_dims['max_components'])

        # Critic网络：输出状态价值
        self.critic = nn.Linear(64, 1)

    def forward(self, state: torch.Tensor) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor]:
        """前向传播，返回动作概率和状态价值"""
        x = self.shared_net(state)

        # 工件选择概率
        workpiece_logits = self.actor_workpiece(x)
        workpiece_probs = nn.Softmax(dim=-1)(workpiece_logits)

        # 充气设备选择概率
        inflation_eq_logits = self.actor_inflation_eq(x)
        inflation_eq_probs = nn.Softmax(dim=-1)(inflation_eq_logits)

        # 分析设备分配概率（简化）
        analysis_x = nn.Tanh()(self.actor_analysis_base(x))
        group_logits = self.actor_analysis_groups(analysis_x)
        group_probs = nn.Softmax(dim=-1)(group_logits)

        analysis_eq_logits = self.actor_analysis_eq(analysis_x)
        analysis_eq_probs = nn.Softmax(dim=-1)(analysis_eq_logits)

        # 状态价值
        value = self.critic(x)

        return (workpiece_probs, inflation_eq_probs, group_probs, analysis_eq_probs), value

    def get_action(self, state: torch.Tensor) -> Tuple[Tuple[int, int, List[List[int]]], torch.Tensor, torch.Tensor]:
        """根据状态选择动作并返回动作概率和状态价值"""
        (workpiece_probs, inflation_eq_probs, group_probs, analysis_eq_probs), value = self.forward(state)

        # 采样动作
        workpiece_dist = Categorical(workpiece_probs)
        inflation_eq_dist = Categorical(inflation_eq_probs)
        group_dist = Categorical(group_probs)

        workpiece_idx = workpiece_dist.sample()
        inflation_eq_idx = inflation_eq_dist.sample()
        num_groups = group_dist.sample() + 1  # 至少1个组
        num_groups = min(num_groups, self.action_dims['max_components'])  # 确保不超过最大组分数

        # 生成分析设备分配方案
        analysis_eq_assignment = []
        components_per_group = []

        # 随机分配组分到各组
        remaining_components = self.action_dims['max_components']
        for i in range(num_groups):
            if i == num_groups - 1:
                # 最后一组分配剩余所有组分
                components = remaining_components
            else:
                # 随机分配1到剩余组分减1个组分
                min_components = 1
                max_possible = remaining_components - (num_groups - i - 1)
                max_components = max_possible if max_possible > min_components else min_components
                components = random.randint(min_components, max_components)

            components_per_group.append(components)
            remaining_components -= components
            if remaining_components <= 0:
                break

        # 为每个组分配设备并记录日志概率
        eq_idx = 0
        group_eqs_list = []  # 存储每个组的设备列表
        group_log_probs = []  # 存储每个设备选择的日志概率

        for components in components_per_group:
            group_eqs = []
            for _ in range(components):
                # 确保不超出概率数组范围
                if eq_idx + self.action_dims['analysis_eq'] > len(analysis_eq_probs):
                    # 重置索引，避免越界
                    eq_idx = 0

                eq_dist = Categorical(analysis_eq_probs[eq_idx:eq_idx + self.action_dims['analysis_eq']])
                eq = eq_dist.sample()
                group_eqs.append(eq.item())
                group_log_probs.append(eq_dist.log_prob(eq))
                eq_idx += self.action_dims['analysis_eq']

            group_eqs_list.append(group_eqs)

        analysis_eq_assignment = group_eqs_list

        # 计算动作的对数概率
        log_prob = (workpiece_dist.log_prob(workpiece_idx) +
                    inflation_eq_dist.log_prob(inflation_eq_idx) +
                    group_dist.log_prob(num_groups - 1))

        # 添加分析设备选择的对数概率
        for eq_log_prob in group_log_probs:
            log_prob += eq_log_prob

        action = (workpiece_idx.item(), inflation_eq_idx.item(), analysis_eq_assignment)
        return action, log_prob, value


class PPO:
    """PPO算法实现"""

    def __init__(self, state_dim: int, action_dims: Dict[str, int],
                 lr: float = 3e-4, gamma: float = 0.99,
                 gae_lambda: float = 0.95, clip_epsilon: float = 0.2,
                 K_epochs: int = 10):
        self.gamma = gamma
        self.gae_lambda = gae_lambda
        self.clip_epsilon = clip_epsilon
        self.K_epochs = K_epochs

        # 初始化Actor-Critic网络
        self.policy = ActorCritic(state_dim, action_dims).to(device)
        self.optimizer = optim.Adam(self.policy.parameters(), lr=lr)

        # 旧策略网络，用于计算比率
        self.old_policy = ActorCritic(state_dim, action_dims).to(device)
        self.old_policy.load_state_dict(self.policy.state_dict())

        # 价值损失函数
        self.value_loss_fn = nn.MSELoss()

        # 存储训练数据
        self.memory = []

    def select_action(self, state: np.ndarray) -> Tuple[Tuple[int, int, List[List[int]]], torch.Tensor, torch.Tensor]:
        """选择动作"""
        state = torch.FloatTensor(state).to(device)
        with torch.no_grad():
            action, log_prob, value = self.old_policy.get_action(state)
        return action, log_prob, value

    def store_transition(self, state: np.ndarray, action: Tuple[int, int, List[List[int]]],
                         log_prob: torch.Tensor, value: torch.Tensor, reward: float, done: bool):
        """存储转换样本"""
        self.memory.append((state, action, log_prob, value, reward, done))

    def update(self):
        """更新策略网络"""
        if not self.memory:  # 避免空记忆更新
            return

        # 提取记忆中的数据
        states, actions, old_log_probs, values, rewards, dones = zip(*self.memory)

        # 转换为张量
        old_log_probs = torch.stack(old_log_probs).to(device).detach()
        values = torch.stack(values).to(device).detach()

        # 计算优势估计
        returns = []
        advantages = []
        gae = 0

        for i in reversed(range(len(rewards))):
            if i + 1 < len(rewards):
                delta = rewards[i] + self.gamma * (1 - dones[i]) * values[i + 1] - values[i]
                gae = delta + self.gamma * self.gae_lambda * (1 - dones[i]) * gae
            else:
                delta = rewards[i] - values[i]
                gae = delta

            advantages.insert(0, gae)
            returns.insert(0, gae + values[i])

        advantages = torch.tensor(advantages).to(device)
        returns = torch.tensor(returns).to(device)

        # 标准化优势
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)

        # 多次更新策略
        for _ in range(self.K_epochs):
            # 转换状态为张量
            states_tensor = torch.FloatTensor(np.array(states)).to(device)

            # 获取新的策略分布和价值
            _, new_values = self.policy(states_tensor)

            # 计算新的动作对数概率
            new_log_probs = []
            for i in range(len(states)):
                _, log_prob, _ = self.policy.get_action(states_tensor[i])
                new_log_probs.append(log_prob)
            new_log_probs = torch.stack(new_log_probs).to(device)

            # 计算比率
            ratio = torch.exp(new_log_probs - old_log_probs)

            # 计算PPO损失
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages
            policy_loss = -torch.min(surr1, surr2).mean()

            # 计算价值损失
            value_loss = self.value_loss_fn(new_values.squeeze(), returns)

            # 总损失
            total_loss = policy_loss + 0.5 * value_loss

            # 反向传播和优化
            self.optimizer.zero_grad()
            total_loss.backward()
            self.optimizer.step()

        # 更新旧策略
        self.old_policy.load_state_dict(self.policy.state_dict())

        # 清空记忆
        self.memory = []


def train_ppo(env: GasSchedulingEnv, ppo: PPO, episodes: int, max_steps: int = 1000) -> List[float]:
    """训练PPO智能体"""
    rewards_history = []

    for episode in range(episodes):
        state = env.reset()
        total_reward = 0
        done = False
        step = 0

        while not done and step < max_steps:
            try:
                # 选择动作
                action, log_prob, value = ppo.select_action(state)

                # 执行动作
                next_state, reward, done, info = env.step(action)

                # 记录无效动作
                if "invalid" in info:
                    print(f"Episode {episode}, Step {step}: {info}")

                # 存储转换
                ppo.store_transition(state, action, log_prob, value, reward, done)

                # 更新状态和奖励
                state = next_state
                total_reward += reward
                step += 1
            except Exception as e:
                print(f"Episode {episode}, Step {step} error: {str(e)}")
                reward = -200  # 给予大的惩罚
                total_reward += reward
                step += 1
                if step >= max_steps:
                    done = True

        # 更新策略
        ppo.update()

        # 记录奖励
        rewards_history.append(total_reward)

        # 打印进度
        if episode % 10 == 0:
            print(f"Episode {episode}, Total Reward: {total_reward:.2f}, Steps: {step}")

    return rewards_history


def plot_results(rewards_history: List[float], env: GasSchedulingEnv, ppo: PPO):
    """绘制训练结果和最终调度方案"""
    # 绘制奖励曲线
    plt.figure(figsize=(10, 6))
    plt.plot(rewards_history)
    plt.xlabel('训练回合')
    plt.ylabel('总奖励')
    plt.title('PPO训练奖励曲线')
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.tight_layout()
    plt.show()

    # 绘制最终调度方案
    state = env.reset()
    done = False
    step = 0
    max_steps = 1000

    while not done and step < max_steps:
        action, _, _ = ppo.select_action(state)
        next_state, _, done, _ = env.step(action)
        state = next_state
        step += 1

    inflation_details, analysis_details = env.get_schedule_details()

    print(f"调度完成，总时间: {env.current_time}, 步数: {step}")


# 示例用法
if __name__ == "__main__":
    # 参数设置
    num_workpieces = 5  # 5个气瓶
    num_components = 3  # 每个气瓶有3种组分
    num_inflation_eq = 2  # 2台充气设备
    num_analysis_eq = 2  # 每种组分有2台分析设备
    episodes = 100  # 训练回合数

    # 创建环境
    env = GasSchedulingEnv(
        num_workpieces=num_workpieces,
        num_components=num_components,
        num_inflation_eq=num_inflation_eq,
        num_analysis_eq_per_component=num_analysis_eq
    )

    # 状态维度计算
    state_dim = (num_inflation_eq +
                 num_components * num_analysis_eq +
                 num_workpieces +
                 num_workpieces)

    # 动作维度信息
    action_dims = {
        'workpiece': num_workpieces,
        'inflation_eq': num_inflation_eq,
        'analysis_eq': num_analysis_eq,
        'max_groups': num_components,
        'max_components': num_components
    }

    # 创建PPO智能体
    ppo = PPO(
        state_dim=state_dim,
        action_dims=action_dims,
        lr=3e-4,
        gamma=0.99,
        gae_lambda=0.95,
        clip_epsilon=0.2,
        K_epochs=10
    )

    # 训练智能体
    print("开始训练PPO智能体...")
    rewards_history = train_ppo(env, ppo, episodes)

    # 绘制结果
    plot_results(rewards_history, env, ppo)
