#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：DP3.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/29 20:11

用DQN  PPO
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import random
import time

# 确保中文显示正常
plt.rcParams["font.family"] = ["SimHei", "SimHei", "SimHei"]


# --------------------------
# 柔性作业车间环境定义
# --------------------------
class FJSPEnv:
    def __init__(self, num_jobs=5, num_machines=3):
        self.num_jobs = num_jobs  # 作业数量
        self.num_machines = num_machines  # 机器数量

        # 初始化工序数据: 每个作业的工序数随机3-5，每道工序的可选机器及加工时间
        self.processes = []  # [作业][工序] = [(机器1, 时间1), (机器2, 时间2), ...]
        for _ in range(num_jobs):
            job_processes = []
            num_ops = random.randint(3, 5)  # 每个作业3-5道工序
            for _ in range(num_ops):
                # 每道工序可在1-3台机器上加工
                possible_machines = random.sample(range(num_machines),
                                                  random.randint(1, min(3, num_machines)))
                times = [random.randint(5, 20) for _ in possible_machines]  # 加工时间5-20
                job_processes.append(list(zip(possible_machines, times)))
            self.processes.append(job_processes)

        self.reset()

    def reset(self):
        self.job_progress = [0] * self.num_jobs  # 每个作业已完成的工序数
        self.machine_times = [0] * self.num_machines  # 各机器当前完工时间
        self.completed_jobs = 0  # 已完成的作业数
        self.current_makespan = 0  # 当前最大完工时间
        return self._get_state()

    def _get_state(self):
        """将环境状态编码为向量"""
        # 机器状态 + 作业进度 + 下道工序可选机器掩码
        state = []
        # 机器状态归一化
        max_possible_time = sum(max(t for _, t in op) for job in self.processes for op in job)
        state.extend([t / max_possible_time for t in self.machine_times])

        # 作业进度
        max_ops = max(len(job) for job in self.processes)
        state.extend([p / max_ops for p in self.job_progress])

        # 下道工序可选机器掩码
        for job_idx in range(self.num_jobs):
            if self.job_progress[job_idx] < len(self.processes[job_idx]):
                possible_machines = [m for m, _ in self.processes[job_idx][self.job_progress[job_idx]]]
                for m in range(self.num_machines):
                    state.append(1.0 if m in possible_machines else 0.0)
            else:
                state.extend([0.0] * self.num_machines)

        return np.array(state, dtype=np.float32)

    def step(self, action):
        """执行动作: action = (job_idx, machine_idx)"""
        job_idx, machine_idx = action
        done = False
        reward = 0

        # 检查动作有效性
        if (self.job_progress[job_idx] >= len(self.processes[job_idx]) or
                machine_idx not in [m for m, _ in self.processes[job_idx][self.job_progress[job_idx]]]):
            return self._get_state(), -100, done  # 无效动作惩罚

        # 计算加工时间
        op_idx = self.job_progress[job_idx]
        process_time = next(t for m, t in self.processes[job_idx][op_idx] if m == machine_idx)

        # 更新机器时间
        start_time = max(self.machine_times[machine_idx], self._get_job_last_end_time(job_idx))
        end_time = start_time + process_time
        self.machine_times[machine_idx] = end_time

        # 更新作业进度
        self.job_progress[job_idx] += 1
        if self.job_progress[job_idx] == len(self.processes[job_idx]):
            self.completed_jobs += 1

        # 计算奖励（基于makespan变化）
        new_makespan = max(self.machine_times)
        reward = self.current_makespan - new_makespan  # 减少makespan获得正奖励
        self.current_makespan = new_makespan

        # 检查是否完成所有作业
        if self.completed_jobs == self.num_jobs:
            done = True
            reward += 1000 / new_makespan  # 最终完工奖励
            # print(f"完成调度! Makespan: {new_makespan}")

        return self._get_state(), reward, done

    def _get_job_last_end_time(self, job_idx):
        """获取作业上一道工序的结束时间（用于保证工序顺序）"""
        if self.job_progress[job_idx] == 0:
            return 0
        # 实际场景中需要记录每个工序的结束时间，这里简化处理
        return max(self.machine_times) * 0.3  # 近似值

    def get_valid_actions(self):
        """返回当前所有有效动作"""
        valid_actions = []
        for job_idx in range(self.num_jobs):
            if self.job_progress[job_idx] < len(self.processes[job_idx]):
                op_idx = self.job_progress[job_idx]
                for m, _ in self.processes[job_idx][op_idx]:
                    valid_actions.append((job_idx, m))
        return valid_actions


# --------------------------
# DQN算法实现
# --------------------------
class DQNNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DQNNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, action_dim)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        return self.fc3(x)


Experience = namedtuple('Experience', ('state', 'action', 'reward', 'next_state', 'done'))


class DQNAgent:
    def __init__(self, state_dim, action_space, lr=1e-3, gamma=0.99, epsilon=1.0, batch_size=64):
        self.state_dim = state_dim
        self.action_space = action_space  # 所有可能动作的列表
        self.action_dim = len(action_space)
        self.action_to_idx = {a: i for i, a in enumerate(action_space)}

        self.gamma = gamma  # 折扣因子
        self.epsilon = epsilon  # 探索率
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.batch_size = batch_size

        # 主网络和目标网络
        self.policy_net = DQNNetwork(state_dim, self.action_dim)
        self.target_net = DQNNetwork(state_dim, self.action_dim)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=lr)
        self.memory = deque(maxlen=10000)  # 经验回放池
        self.loss_fn = nn.MSELoss()

    def select_action(self, state, valid_actions):
        """根据当前状态和有效动作选择动作（ε-贪婪策略）"""
        if random.random() < self.epsilon:
            # 随机选择有效动作
            return random.choice(valid_actions)
        else:
            # 贪婪选择Q值最大的有效动作
            with torch.no_grad():
                state_tensor = torch.tensor(state).unsqueeze(0)
                q_values = self.policy_net(state_tensor)

                # 只考虑有效动作
                valid_indices = [self.action_to_idx[a] for a in valid_actions]
                max_q_idx = valid_indices[torch.argmax(q_values[0, valid_indices]).item()]
                return self.action_space[max_q_idx]

    def store_experience(self, *args):
        """存储经验到回放池"""
        self.memory.append(Experience(*args))

    def update_policy(self):
        """从经验回放池中采样并更新策略网络"""
        if len(self.memory) < self.batch_size:
            return 0

        # 采样经验
        experiences = random.sample(self.memory, self.batch_size)
        states = torch.tensor([e.state for e in experiences])
        actions = torch.tensor([self.action_to_idx[e.action] for e in experiences])
        rewards = torch.tensor([e.reward for e in experiences])
        next_states = torch.tensor([e.next_state for e in experiences])
        dones = torch.tensor([e.done for e in experiences], dtype=torch.float32)

        # 计算当前Q值和目标Q值
        current_q = self.policy_net(states).gather(1, actions.unsqueeze(1)).squeeze(1)

        with torch.no_grad():
            next_q = self.target_net(next_states).max(1)[0]
            target_q = rewards + (1 - dones) * self.gamma * next_q

        # 计算损失并优化
        loss = self.loss_fn(current_q, target_q)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        # 衰减探索率
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

        return loss.item()

    def update_target_network(self):
        """更新目标网络"""
        self.target_net.load_state_dict(self.policy_net.state_dict())


# --------------------------
# PPO算法实现
# --------------------------
class PPONetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(PPONetwork, self).__init__()
        self.shared = nn.Sequential(
            nn.Linear(state_dim, 128),
            nn.Tanh(),
            nn.Linear(128, 128),
            nn.Tanh()
        )
        self.actor = nn.Linear(128, action_dim)  # 策略网络
        self.critic = nn.Linear(128, 1)  # 价值网络

    def forward(self, x):
        x = self.shared(x)
        logits = self.actor(x)
        value = self.critic(x)
        return logits, value


class PPOAgent:
    def __init__(self, state_dim, action_space, lr=3e-4, gamma=0.99, clip_epsilon=0.2,
                 K_epochs=10, batch_size=64):
        self.state_dim = state_dim
        self.action_space = action_space
        self.action_dim = len(action_space)
        self.action_to_idx = {a: i for i, a in enumerate(action_space)}

        self.gamma = gamma  # 折扣因子
        self.clip_epsilon = clip_epsilon  # PPO剪辑参数
        self.K_epochs = K_epochs  # 多轮更新次数
        self.batch_size = batch_size

        self.network = PPONetwork(state_dim, self.action_dim)
        self.optimizer = optim.Adam(self.network.parameters(), lr=lr)
        self.mse_loss = nn.MSELoss()

    def select_action(self, state, valid_actions):
        """选择动作并返回概率和价值"""
        state_tensor = torch.tensor(state).unsqueeze(0)
        logits, value = self.network(state_tensor)

        # 只考虑有效动作
        valid_indices = [self.action_to_idx[a] for a in valid_actions]
        mask = torch.zeros(self.action_dim)
        mask[valid_indices] = 1.0
        masked_logits = logits + (mask - 1) * 1e10  # 无效动作概率趋近于0

        # 采样动作
        dist = torch.distributions.Categorical(logits=masked_logits)
        action_idx = dist.sample()
        action_prob = dist.log_prob(action_idx).item()
        action = self.action_space[action_idx.item()]

        return action, action_prob, value.item()

    def compute_gae(self, rewards, values, dones, next_value):
        """计算广义优势估计（GAE）"""
        advantages = []
        last_advantage = 0
        last_value = next_value

        for t in reversed(range(len(rewards))):
            delta = rewards[t] + self.gamma * last_value * (1 - dones[t]) - values[t]
            last_advantage = delta + self.gamma * 0.95 * (1 - dones[t]) * last_advantage
            advantages.insert(0, last_advantage)
            last_value = values[t]

        returns = [a + v for a, v in zip(advantages, values)]
        return advantages, returns

    def update_policy(self, trajectories):
        """更新PPO策略"""
        states = torch.tensor([s for traj in trajectories for s in traj['states']])
        actions = torch.tensor([self.action_to_idx[a] for traj in trajectories for a in traj['actions']])
        old_probs = torch.tensor([p for traj in trajectories for p in traj['probs']])
        advantages = torch.tensor([a for traj in trajectories for a in traj['advantages']])
        returns = torch.tensor([r for traj in trajectories for r in traj['returns']])

        # 标准化优势
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        # 多轮更新
        total_loss = 0
        for _ in range(self.K_epochs):
            # 随机采样批次
            indices = torch.randperm(len(states))
            for start in range(0, len(states), self.batch_size):
                end = start + self.batch_size
                batch_indices = indices[start:end]

                # 计算当前策略
                logits, values = self.network(states[batch_indices])
                dist = torch.distributions.Categorical(logits=logits)
                new_probs = dist.log_prob(actions[batch_indices])

                # 计算概率比值
                ratio = torch.exp(new_probs - old_probs[batch_indices])

                # 计算PPO损失
                surr1 = ratio * advantages[batch_indices]
                surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages[batch_indices]
                actor_loss = -torch.min(surr1, surr2).mean()
                critic_loss = self.mse_loss(values.squeeze(), returns[batch_indices])
                loss = actor_loss + 0.5 * critic_loss

                # 优化
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                total_loss += loss.item()

        return total_loss / (self.K_epochs * (len(states) // self.batch_size + 1))


# --------------------------
# 训练与评估函数
# --------------------------
def train_dqn(env, episodes=300):
    """训练DQN智能体"""
    # 获取所有可能的动作
    all_possible_actions = []
    for job in range(env.num_jobs):
        for op in env.processes[job]:
            all_possible_actions.extend([(job, m) for m, _ in op])
    all_possible_actions = list(set(all_possible_actions))  # 去重

    state_dim = len(env.reset())
    agent = DQNAgent(state_dim, all_possible_actions)

    # 训练记录
    history = {
        'makespan': [],
        'reward': [],
        'loss': []
    }

    print("开始DQN训练...")
    start_time = time.time()

    for episode in range(episodes):
        state = env.reset()
        total_reward = 0
        total_loss = 0
        done = False
        steps = 0

        while not done:
            valid_actions = env.get_valid_actions()
            action = agent.select_action(state, valid_actions)
            next_state, reward, done = env.step(action)

            # 存储经验
            agent.store_experience(state, action, reward, next_state, done)

            # 更新策略
            loss = agent.update_policy()
            total_loss += loss

            state = next_state
            total_reward += reward
            steps += 1

            if steps > 1000:  # 防止无限循环
                done = True

        # 每10个episode更新一次目标网络
        if episode % 10 == 0:
            agent.update_target_network()

        # 记录结果
        history['makespan'].append(env.current_makespan)
        history['reward'].append(total_reward)
        history['loss'].append(total_loss / steps if steps > 0 else 0)

        # 打印进度
        if (episode + 1) % 20 == 0:
            avg_makespan = np.mean(history['makespan'][-20:])
            print(f"episode {episode + 1}/{episodes}, 平均makespan: {avg_makespan:.2f}, "
                  f"平均奖励: {np.mean(history['reward'][-20:]):.2f}, 探索率: {agent.epsilon:.3f}")

    print(f"DQN训练完成，耗时: {time.time() - start_time:.2f}秒")
    return agent, history


def train_ppo(env, episodes=300, traj_per_update=4):
    """训练PPO智能体"""
    # 获取所有可能的动作
    all_possible_actions = []
    for job in range(env.num_jobs):
        for op in env.processes[job]:
            all_possible_actions.extend([(job, m) for m, _ in op])
    all_possible_actions = list(set(all_possible_actions))  # 去重

    state_dim = len(env.reset())
    agent = PPOAgent(state_dim, all_possible_actions)

    # 训练记录
    history = {
        'makespan': [],
        'reward': [],
        'loss': []
    }

    print("\n开始PPO训练...")
    start_time = time.time()

    for episode in range(episodes):
        trajectories = []

        # 收集多个轨迹
        for _ in range(traj_per_update):
            state = env.reset()
            states, actions, probs, rewards, values, dones = [], [], [], [], [], []
            done = False
            steps = 0

            while not done:
                valid_actions = env.get_valid_actions()
                action, action_prob, value = agent.select_action(state, valid_actions)
                next_state, reward, done = env.step(action)

                # 存储轨迹
                states.append(state)
                actions.append(action)
                probs.append(action_prob)
                rewards.append(reward)
                values.append(value)
                dones.append(done)

                state = next_state
                steps += 1

                if steps > 1000:  # 防止无限循环
                    done = True

            # 计算最后一个状态的价值
            with torch.no_grad():
                last_state_tensor = torch.tensor(state).unsqueeze(0)
                _, last_value = agent.network(last_state_tensor)
                last_value = last_value.item()

            # 计算优势和回报
            advantages, returns = agent.compute_gae(rewards, values, dones, last_value)

            # 存储完整轨迹
            trajectories.append({
                'states': states,
                'actions': actions,
                'probs': probs,
                'advantages': advantages,
                'returns': returns
            })

            # 记录单个轨迹结果
            history['makespan'].append(env.current_makespan)
            history['reward'].append(sum(rewards))

        # 更新策略
        loss = agent.update_policy(trajectories)
        history['loss'].append(loss)

        # 打印进度
        if (episode + 1) % 20 == 0:
            avg_makespan = np.mean(history['makespan'][-20 * traj_per_update:])
            print(f"episode {episode + 1}/{episodes}, 平均makespan: {avg_makespan:.2f}, "
                  f"平均奖励: {np.mean(history['reward'][-20 * traj_per_update:]):.2f}, 损失: {loss:.4f}")

    print(f"PPO训练完成，耗时: {time.time() - start_time:.2f}秒")
    return agent, history


def plot_training_history(dqn_history, ppo_history):
    """绘制训练历史对比图"""
    fig, axes = plt.subplots(3, 1, figsize=(12, 18))

    # Makespan对比
    axes[0].plot(dqn_history['makespan'], label='DQN', alpha=0.7)
    axes[0].plot(ppo_history['makespan'], label='PPO', alpha=0.7)
    axes[0].set_title('训练过程中的Makespan变化')
    axes[0].set_xlabel('训练步数')
    axes[0].set_ylabel('最大完工时间')
    axes[0].legend()

    # 奖励对比
    axes[1].plot(dqn_history['reward'], label='DQN', alpha=0.7)
    axes[1].plot(ppo_history['reward'], label='PPO', alpha=0.7)
    axes[1].set_title('训练过程中的奖励变化')
    axes[1].set_xlabel('训练步数')
    axes[1].set_ylabel('总奖励')
    axes[1].legend()

    # 损失对比
    axes[2].plot(dqn_history['loss'], label='DQN', alpha=0.7)
    axes[2].plot(ppo_history['loss'], label='PPO', alpha=0.7)
    axes[2].set_title('训练过程中的损失变化')
    axes[2].set_xlabel('训练步数')
    axes[2].set_ylabel('损失值')
    axes[2].legend()

    plt.tight_layout()
    plt.savefig('fjsp_drl_training_history.png')
    print("训练历史图表已保存为 fjsp_drl_training_history.png")
    plt.show()


# --------------------------
# 主函数：运行训练并对比结果
# --------------------------
if __name__ == "__main__":
    # 创建环境（5个作业，3台机器）
    env = FJSPEnv(num_jobs=5, num_machines=3)

    # 训练智能体
    dqn_agent, dqn_history = train_dqn(env, episodes=300)
    ppo_agent, ppo_history = train_ppo(env, episodes=300)

    # 绘制训练历史
    plot_training_history(dqn_history, ppo_history)

    # 最终评估
    print("\n最终评估:")
    env.reset()
    dqn_makespans = []
    for _ in range(10):
        state = env.reset()
        done = False
        while not done:
            valid_actions = env.get_valid_actions()
            action = dqn_agent.select_action(state, valid_actions)
            state, _, done = env.step(action)
        dqn_makespans.append(env.current_makespan)

    ppo_makespans = []
    for _ in range(10):
        state = env.reset()
        done = False
        while not done:
            valid_actions = env.get_valid_actions()
            action, _, _ = ppo_agent.select_action(state, valid_actions)
            state, _, done = env.step(action)
        ppo_makespans.append(env.current_makespan)

    print(f"DQN平均Makespan: {np.mean(dqn_makespans):.2f}")
    print(f"PPO平均Makespan: {np.mean(ppo_makespans):.2f}")
    print(f"PPO相对DQN的改进: {(1 - np.mean(ppo_makespans) / np.mean(dqn_makespans)) * 100:.2f}%")
