#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T5.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/10/9 21:42 
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import json
from typing import List, Dict, Tuple, Optional

# 设置中文显示
plt.rcParams["font.family"] = ["SimHei", "SimHei", "SimHei"]
plt.rcParams["axes.unicode_minus"] = False


class GasSchedulingEnv:
    """气体调度问题的强化学习环境"""

    def __init__(self, instance: Dict):
        """初始化环境"""
        # 从算例中获取参数
        params = instance['parameters']
        processing_times = instance['processing_times']

        self.num_workpieces = params['num_workpieces']
        self.num_components = params['num_components']
        self.num_inflation_eq = params['num_inflation_eq']
        self.num_analysis_eq_per_component = params['num_analysis_eq_per_component']

        # 加工时间
        self.inflation_time = np.array(processing_times['inflation_time'])
        self.analysis_time = np.array(processing_times['analysis_time'])

        # 初始化状态
        self.reset()

    def reset(self) -> np.ndarray:
        """重置环境并返回初始状态"""
        # 标记哪些工件已完成充气和分析
        self.completed_inflation = [False] * self.num_workpieces
        self.completed_analysis = [False] * self.num_workpieces

        # 设备状态：记录每个设备何时空闲
        self.inflation_eq_free_time = [0] * self.num_inflation_eq
        self.analysis_eq_free_time = [
            [0 for _ in range(self.num_analysis_eq_per_component)]
            for _ in range(self.num_components)
        ]

        # 记录每个工件的充气完成时间
        self.workpiece_inflation_end = [0] * self.num_workpieces

        # 当前时间
        self.current_time = 0

        # 记录调度决策（用于后续可视化）
        self.inflation_schedule = []  # 记录(工件, 设备, 开始时间, 结束时间)
        self.analysis_schedule = []  # 记录(工件, 组分, 设备, 开始时间, 结束时间, 组ID)

        return self._get_state()

    def _get_state(self) -> np.ndarray:
        """将环境状态编码为神经网络可处理的向量"""
        # 状态组成：
        # 1. 已完成充气的工件（二进制）
        # 2. 已完成分析的工件（二进制）
        # 3. 各充气设备的空闲时间
        # 4. 各分析设备的空闲时间

        state = []

        # 已完成充气的工件（二进制编码）
        state.extend([1 if done else 0 for done in self.completed_inflation])

        # 已完成分析的工件（二进制编码）
        state.extend([1 if done else 0 for done in self.completed_analysis])

        # 充气设备空闲时间（归一化）
        max_possible_time = np.sum(self.inflation_time) + np.sum(self.analysis_time)
        state.extend([t / max_possible_time for t in self.inflation_eq_free_time])

        # 分析设备空闲时间（归一化）
        for component in range(self.num_components):
            state.extend([t / max_possible_time for t in self.analysis_eq_free_time[component]])

        return np.array(state, dtype=np.float32)

    def step(self, action: Tuple[int, int, List[int], List[List[int]]]) -> Tuple[np.ndarray, float, bool, Dict]:
        """
        执行动作并返回新状态、奖励、是否终止和信息

        参数:
            action: 包含四个元素的元组:
                - 选择的工件索引
                - 分配的充气设备索引
                - 组分分组方案（每个组分属于哪个组）
                - 每组的分析设备分配方案

        返回:
            next_state: 新状态
            reward: 奖励值
            done: 是否终止
            info: 附加信息
        """
        # 解析动作
        workpiece, inflate_eq, component_groups, analysis_eq_assignment = action

        # 检查动作有效性
        if (self.completed_inflation[workpiece] or
                inflate_eq < 0 or inflate_eq >= self.num_inflation_eq):
            return self._get_state(), -100, False, {"error": "无效动作"}

        # 执行充气操作
        start_time = max(self.current_time, self.inflation_eq_free_time[inflate_eq])
        end_time = start_time + self.inflation_time[workpiece]

        # 更新充气设备状态
        self.inflation_eq_free_time[inflate_eq] = end_time
        self.completed_inflation[workpiece] = True
        self.workpiece_inflation_end[workpiece] = end_time

        # 记录充气调度
        self.inflation_schedule.append((workpiece, inflate_eq, start_time, end_time))

        # 执行分析操作（基于分组）
        group_ids = list(set(component_groups))
        group_start_time = end_time  # 分析必须在充气完成后开始

        # 为每个组安排分析
        for group_id in group_ids:
            # 获取该组的所有组分
            components_in_group = [i for i, g in enumerate(component_groups) if g == group_id]
            if not components_in_group:
                continue

            # 获取该组的设备分配
            eq_for_group = analysis_eq_assignment[group_id]
            if len(eq_for_group) != len(components_in_group):
                return self._get_state(), -50, False, {"error": "设备分配与组分数不匹配"}

            # 计算组的准备时间（所有设备都必须空闲）
            group_ready_time = group_start_time
            for i, component in enumerate(components_in_group):
                eq = eq_for_group[i]
                if eq < 0 or eq >= self.num_analysis_eq_per_component:
                    return self._get_state(), -50, False, {"error": "无效的分析设备"}
                group_ready_time = max(group_ready_time, self.analysis_eq_free_time[component][eq])

            # 计算组的分析时间（取最长的单个分析时间）
            max_analysis_time = max([self.analysis_time[workpiece][c] for c in components_in_group])
            group_end_time = group_ready_time + max_analysis_time

            # 更新分析设备状态
            for i, component in enumerate(components_in_group):
                eq = eq_for_group[i]
                self.analysis_eq_free_time[component][eq] = group_end_time
                # 记录分析调度
                self.analysis_schedule.append((
                    workpiece, component, eq, group_ready_time, group_end_time, group_id
                ))

            # 更新组的开始时间（下一组必须等当前组完成）
            group_start_time = group_end_time

        # 标记工件分析完成
        self.completed_analysis[workpiece] = True

        # 更新当前时间
        self.current_time = max(self.current_time, end_time)

        # 计算奖励
        reward = self._calculate_reward()

        # 检查是否所有工件都已完成
        done = all(self.completed_analysis)

        # 如果完成，最终奖励基于总时间
        if done:
            total_time = max(max(self.inflation_eq_free_time),
                             max(max(times) for times in self.analysis_eq_free_time))
            reward -= total_time  # 总时间越短，奖励越高

        return self._get_state(), reward, done, {"total_time": self.current_time}

    def _calculate_reward(self) -> float:
        """计算即时奖励"""
        reward = 0

        # 1. 完成充气的奖励
        newly_completed_inflation = sum(self.completed_inflation)
        reward += newly_completed_inflation * 10

        # 2. 完成分析的奖励
        newly_completed_analysis = sum(self.completed_analysis)
        reward += newly_completed_analysis * 20

        # 3. 设备利用率奖励（减少设备空闲时间）
        inflation_util = sum(self.inflation_eq_free_time) / (self.num_inflation_eq * (self.current_time + 1))
        reward -= inflation_util * 5  # 空闲率越高，惩罚越大

        # 4. 避免过早占用设备的奖励
        for i in range(self.num_workpieces):
            if self.completed_inflation[i] and not self.completed_analysis[i]:
                # 充气完成但分析未完成的时间惩罚
                delay = self.current_time - self.workpiece_inflation_end[i]
                reward -= delay * 0.1

        return reward

    def get_schedule(self) -> Dict:
        """返回完整的调度方案"""
        total_time = max(max(self.inflation_eq_free_time),
                         max(max(times) for times in self.analysis_eq_free_time))

        return {
            "inflation_schedule": self.inflation_schedule,
            "analysis_schedule": self.analysis_schedule,
            "total_time": total_time
        }

    def render(self):
        """可视化调度结果"""
        schedule = self.get_schedule()
        total_time = schedule["total_time"]

        # 创建画布
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))
        fig.suptitle(f'气体调度结果 (总时间: {total_time:.2f})', fontsize=16)

        # 生成颜色列表
        colors = plt.cm.tab10(np.linspace(0, 1, self.num_workpieces))

        # 绘制充气阶段
        y_ticks = list(range(1, self.num_inflation_eq + 1))
        y_labels = [f'充气设备 {i}' for i in range(self.num_inflation_eq)]

        for workpiece, eq, start, end in schedule["inflation_schedule"]:
            ax1.barh(eq + 1, end - start, left=start, height=0.6,
                     color=colors[workpiece], edgecolor='black')
            ax1.text(start + (end - start) / 2, eq + 1, f'瓶{workpiece}',
                     ha='center', va='center', color='white', fontweight='bold')

        ax1.set_yticks(y_ticks)
        ax1.set_yticklabels(y_labels)
        ax1.set_xlabel('时间')
        ax1.set_title('充气阶段')
        ax1.set_xlim(0, total_time * 1.1)
        ax1.grid(True, axis='x', linestyle='--', alpha=0.7)

        # 绘制分析阶段
        y_pos = 1
        y_ticks = []
        y_labels = []
        eq_pos_map = {}  # 记录每个(组分, 设备)对应的y位置

        for component in range(self.num_components):
            for eq in range(self.num_analysis_eq_per_component):
                eq_pos_map[(component, eq)] = y_pos
                y_ticks.append(y_pos)
                y_labels.append(f'组分{component}分析设备{eq}')
                y_pos += 1
            y_pos += 0.5  # 不同组分间留空隙

        # 按组ID对分析任务分组，便于绘制连接线
        groups = {}
        for item in schedule["analysis_schedule"]:
            workpiece, component, eq, start, end, group_id = item
            key = (workpiece, group_id)
            if key not in groups:
                groups[key] = []
            groups[key].append((component, eq, start, end))

        # 绘制分析任务
        for item in schedule["analysis_schedule"]:
            workpiece, component, eq, start, end, group_id = item
            y = eq_pos_map[(component, eq)]
            ax2.barh(y, end - start, left=start, height=0.6,
                     color=colors[workpiece], edgecolor='black')
            ax2.text(start + (end - start) / 2, y, f'瓶{workpiece}',
                     ha='center', va='center', color='white', fontweight='bold', fontsize=8)

        # 绘制串联组连接线
        for (workpiece, group_id), tasks in groups.items():
            if len(tasks) > 1:  # 只处理包含多个组分的组
                # 找到中间时间点
                mid_time = (tasks[0][2] + tasks[0][3]) / 2
                # 找到y坐标范围
                y_min = min(eq_pos_map[(t[0], t[1])] for t in tasks)
                y_max = max(eq_pos_map[(t[0], t[1])] for t in tasks)
                # 绘制虚线连接
                ax2.plot([mid_time, mid_time], [y_min - 0.3, y_max + 0.3],
                         'k--', linewidth=1.5, alpha=0.7)

        ax2.set_yticks(y_ticks)
        ax2.set_yticklabels(y_labels)
        ax2.set_xlabel('时间')
        ax2.set_title('分析阶段（虚线表示串联测试组）')
        ax2.set_xlim(0, total_time * 1.1)
        ax2.grid(True, axis='x', linestyle='--', alpha=0.7)

        # 添加图例
        legend_items = [plt.Rectangle((0, 0), 1, 1, facecolor=colors[i], edgecolor='black', label=f'气瓶 {i}')
                        for i in range(min(10, self.num_workpieces))]
        plt.legend(handles=legend_items, loc='lower center', ncol=5, bbox_to_anchor=(0.5, -0.1))

        plt.tight_layout()
        plt.subplots_adjust(bottom=0.2)
        plt.show()


class PPOActor(nn.Module):
    """PPO算法的策略网络"""

    def __init__(self, state_dim, action_dims):
        super(PPOActor, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 128)

        # 动作空间分为四个部分
        self.workpiece_head = nn.Linear(128, action_dims['workpiece'])
        self.inflate_eq_head = nn.Linear(128, action_dims['inflate_eq'])

        # 组分分组和分析设备分配使用更复杂的结构
        self.grouping_base = nn.Linear(128, 64)
        self.group_head = nn.Linear(64, action_dims['grouping'])

        self.analysis_eq_base = nn.Linear(128, 64)
        self.analysis_eq_head = nn.Linear(64, action_dims['analysis_eq'])

        self.activation = nn.Tanh()

    def forward(self, x):
        x = self.activation(self.fc1(x))
        x = self.activation(self.fc2(x))

        # 工件选择概率
        workpiece_logits = self.workpiece_head(x)
        workpiece_probs = torch.softmax(workpiece_logits, dim=-1)

        # 充气设备选择概率
        inflate_eq_logits = self.inflate_eq_head(x)
        inflate_eq_probs = torch.softmax(inflate_eq_logits, dim=-1)

        # 组分分组概率
        group_x = self.activation(self.grouping_base(x))
        group_logits = self.group_head(group_x)
        group_probs = torch.softmax(group_logits, dim=-1)

        # 分析设备分配概率
        analysis_x = self.activation(self.analysis_eq_base(x))
        analysis_eq_logits = self.analysis_eq_head(analysis_x)
        analysis_eq_probs = torch.softmax(analysis_eq_logits, dim=-1)

        return {
            'workpiece': workpiece_probs,
            'inflate_eq': inflate_eq_probs,
            'grouping': group_probs,
            'analysis_eq': analysis_eq_probs
        }


class PPOCritic(nn.Module):
    """PPO算法的价值网络"""

    def __init__(self, state_dim):
        super(PPOCritic, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, 1)
        self.activation = nn.Tanh()

    def forward(self, x):
        x = self.activation(self.fc1(x))
        x = self.activation(self.fc2(x))
        value = self.fc3(x)
        return value


class PPOAgent:
    """PPO智能体"""

    def __init__(self, state_dim, action_dims, lr_actor=3e-4, lr_critic=3e-4,
                 gamma=0.99, lambda_gae=0.95, clip_epsilon=0.2, K_epochs=10):
        self.gamma = gamma
        self.lambda_gae = lambda_gae
        self.clip_epsilon = clip_epsilon
        self.K_epochs = K_epochs

        # 初始化组分数量和分析设备数量属性
        self._num_components = 0
        self._num_analysis_eq_per_component = 0

        # 网络
        self.actor = PPOActor(state_dim, action_dims)
        self.critic = PPOCritic(state_dim)

        # 优化器
        self.optimizer_actor = optim.Adam(self.actor.parameters(), lr=lr_actor)
        self.optimizer_critic = optim.Adam(self.critic.parameters(), lr=lr_critic)

        # 用于存储轨迹
        self.memory = {
            'states': [],
            'actions': [],
            'rewards': [],
            'old_log_probs': [],
            'dones': []
        }

    def select_action(self, state):
        """根据当前状态选择动作"""
        state = torch.FloatTensor(state).unsqueeze(0)

        with torch.no_grad():
            probs = self.actor(state)

            # 采样动作
            workpiece_dist = Categorical(probs['workpiece'])
            workpiece = workpiece_dist.sample()
            workpiece_log_prob = workpiece_dist.log_prob(workpiece)

            inflate_eq_dist = Categorical(probs['inflate_eq'])
            inflate_eq = inflate_eq_dist.sample()
            inflate_eq_log_prob = inflate_eq_dist.log_prob(inflate_eq)

            group_dist = Categorical(probs['grouping'])
            group_code = group_dist.sample()
            group_log_prob = group_dist.log_prob(group_code)

            analysis_eq_dist = Categorical(probs['analysis_eq'])
            analysis_eq_code = analysis_eq_dist.sample()
            analysis_eq_log_prob = analysis_eq_dist.log_prob(analysis_eq_code)

        # 计算总对数概率
        total_log_prob = workpiece_log_prob + inflate_eq_log_prob + group_log_prob + analysis_eq_log_prob

        # 将编码转换为实际动作
        grouping = self._decode_grouping(group_code.item())
        analysis_eq_assignment = self._decode_analysis_eq(analysis_eq_code.item())

        action = (
            workpiece.item(),
            inflate_eq.item(),
            grouping,
            analysis_eq_assignment
        )

        return action, total_log_prob.item()

    def _decode_grouping(self, code: int) -> List[int]:
        """将分组编码转换为实际的分组方案"""
        # 简单编码：将整数转换为组分分组方案
        groups = []
        for i in range(self._num_components):
            groups.append(code % 3)  # 最多3个组
            code = code // 3
        return groups[:self._num_components]

    def _decode_analysis_eq(self, code: int) -> List[List[int]]:
        """将分析设备分配编码转换为实际的分配方案"""
        # 简化的解码方式
        assignment = []
        for i in range(3):  # 最多3个组
            group_assignment = []
            for j in range(self._num_components):
                group_assignment.append(code % self._num_analysis_eq_per_component)
                code = code // self._num_analysis_eq_per_component
            assignment.append(group_assignment[:self._num_components])
        return assignment

    def store_transition(self, state, action, reward, log_prob, done):
        """存储轨迹信息"""
        self.memory['states'].append(state)
        self.memory['actions'].append(action)
        self.memory['rewards'].append(reward)
        self.memory['old_log_probs'].append(log_prob)
        self.memory['dones'].append(done)

    def clear_memory(self):
        """清空轨迹信息"""
        self.memory = {
            'states': [],
            'actions': [],
            'rewards': [],
            'old_log_probs': [],
            'dones': []
        }

    def compute_gae(self, rewards, values, dones):
        """计算广义优势估计"""
        advantages = []
        advantage = 0
        for i in reversed(range(len(rewards))):
            delta = rewards[i] + self.gamma * values[i + 1] * (1 - dones[i]) - values[i]
            advantage = delta + self.gamma * self.lambda_gae * advantage * (1 - dones[i])
            advantages.insert(0, advantage)

        returns = [a + v for a, v in zip(advantages, values[:-1])]
        return advantages, returns

    def update(self):
        """更新网络参数"""
        # 转换为张量
        states = torch.FloatTensor(self.memory['states'])
        old_log_probs = torch.FloatTensor(self.memory['old_log_probs'])
        rewards = self.memory['rewards']
        dones = self.memory['dones']

        # 计算价值
        with torch.no_grad():
            values = self.critic(states)
            values = values.squeeze().tolist()
            # 添加终止状态的价值
            values.append(0 if dones[-1] else self.critic(states[-1:]).item())

        # 计算优势和回报
        advantages, returns = self.compute_gae(rewards, values, dones)
        advantages = torch.FloatTensor(advantages)
        returns = torch.FloatTensor(returns)

        # 标准化优势
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        # 多次更新
        for _ in range(self.K_epochs):
            # 计算当前策略的对数概率
            probs = self.actor(states)

            # 为了简化示例，这里重新计算动作的对数概率
            current_log_probs = torch.zeros_like(old_log_probs)

            # 计算价值
            current_values = self.critic(states).squeeze()

            # 计算优势比
            ratios = torch.exp(current_log_probs - old_log_probs)

            # 计算actor损失
            surr1 = ratios * advantages
            surr2 = torch.clamp(ratios, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages
            actor_loss = -torch.min(surr1, surr2).mean()

            # 计算critic损失
            critic_loss = nn.MSELoss()(current_values, returns)

            # 总损失
            total_loss = actor_loss + 0.5 * critic_loss

            # 优化
            self.optimizer_actor.zero_grad()
            self.optimizer_critic.zero_grad()
            total_loss.backward()
            self.optimizer_actor.step()
            self.optimizer_critic.step()


class InstanceGenerator:
    """生成气体调度问题算例"""

    @staticmethod
    def generate_instance(num_workpieces: int,
                          num_components: int,
                          num_inflation_eq: int,
                          num_analysis_eq_per_component: int,
                          min_inflation_time: int = 5,
                          max_inflation_time: int = 15,
                          min_analysis_time: int = 3,
                          max_analysis_time: int = 10,
                          save_path: Optional[str] = None) -> Dict:
        inflation_time = np.random.randint(min_inflation_time, max_inflation_time + 1,
                                           size=num_workpieces)
        analysis_time = np.random.randint(min_analysis_time, max_analysis_time + 1,
                                          size=(num_workpieces, num_components))

        instance = {
            'problem_type': 'gas_scheduling',
            'parameters': {
                'num_workpieces': num_workpieces,
                'num_components': num_components,
                'num_inflation_eq': num_inflation_eq,
                'num_analysis_eq_per_component': num_analysis_eq_per_component
            },
            'processing_times': {
                'inflation_time': inflation_time.tolist(),
                'analysis_time': analysis_time.tolist()
            }
        }

        if save_path:
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
            with open(save_path, 'w') as f:
                json.dump(instance, f, indent=2)

        return instance


def train_ppo_agent(env, agent, episodes=1000, max_steps=100):
    """训练PPO智能体"""
    scores = []
    moving_avg_scores = []
    best_score = -np.inf

    for episode in range(episodes):
        state = env.reset()
        score = 0
        done = False
        step = 0

        while not done and step < max_steps:
            # 选择动作
            action, log_prob = agent.select_action(state)

            # 执行动作
            next_state, reward, done, _ = env.step(action)

            # 存储轨迹
            agent.store_transition(state, action, reward, log_prob, done)

            # 更新状态和分数
            state = next_state
            score += reward
            step += 1

        # 更新网络
        agent.update()
        agent.clear_memory()

        # 记录分数
        scores.append(score)
        moving_avg = np.mean(scores[-100:]) if len(scores) >= 100 else np.mean(scores)
        moving_avg_scores.append(moving_avg)

        # 保存最佳模型
        if score > best_score:
            best_score = score
            torch.save(agent.actor.state_dict(), 'best_actor.pth')
            torch.save(agent.critic.state_dict(), 'best_critic.pth')

        # 打印进度
        if episode % 10 == 0:
            print(f"Episode {episode}/{episodes}, Score: {score:.2f}, Moving Avg: {moving_avg:.2f}")

    # 绘制训练曲线
    plt.figure(figsize=(10, 6))
    plt.plot(scores, label='每轮奖励')
    plt.plot(moving_avg_scores, label='移动平均奖励 (窗口=100)')
    plt.xlabel('训练轮次')
    plt.ylabel('奖励值')
    plt.title('PPO算法训练曲线')
    plt.legend()
    plt.grid(True)
    plt.show()

    return agent


def main():
    # 1. 生成问题算例（小规模问题便于训练）
    instance = InstanceGenerator.generate_instance(
        num_workpieces=5,  # 5个工件
        num_components=2,  # 每个工件2种组分
        num_inflation_eq=2,  # 2台充气设备
        num_analysis_eq_per_component=2  # 每种组分2台分析设备
    )

    # 2. 创建环境
    env = GasSchedulingEnv(instance)

    # 3. 定义状态和动作维度
    state_dim = env._get_state().shape[0]
    action_dims = {
        'workpiece': instance['parameters']['num_workpieces'],
        'inflate_eq': instance['parameters']['num_inflation_eq'],
        'grouping': 3 ** instance['parameters']['num_components'],  # 最多3个组
        'analysis_eq': (instance['parameters']['num_analysis_eq_per_component'] **
                        instance['parameters']['num_components']) * 3  # 最多3个组
    }

    # 4. 创建PPO智能体
    agent = PPOAgent(
        state_dim=state_dim,
        action_dims=action_dims,
        lr_actor=3e-4,
        lr_critic=3e-4,
        gamma=0.99,
        lambda_gae=0.95,
        clip_epsilon=0.2,
        K_epochs=10
    )

    # 为智能体设置组分数量和分析设备数量（用于解码）
    agent._num_components = instance['parameters']['num_components']
    agent._num_analysis_eq_per_component = instance['parameters']['num_analysis_eq_per_component']

    # 5. 训练智能体
    print("开始训练PPO智能体...")
    trained_agent = train_ppo_agent(env, agent, episodes=50000, max_steps=20)

    # 6. 测试训练好的智能体
    print("测试训练好的智能体...")
    state = env.reset()
    done = False
    while not done:
        action, _ = trained_agent.select_action(state)
        next_state, _, done, _ = env.step(action)
        state = next_state

    # 7. 可视化结果
    print(f"最终调度总时间: {env.get_schedule()['total_time']:.2f}")
    env.render()


if __name__ == "__main__":
    main()
