#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：DP5.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/29 22:13


DQN和PPO有甘特图
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import random
import time
from matplotlib.patches import Patch

# 确保中文显示正常
plt.rcParams["font.family"] = ["SimHei", "SimHei", "SimHei"]
plt.rcParams["axes.unicode_minus"] = False


# --------------------------
# 柔性作业车间环境
# --------------------------
class FJSPEnv:
    def __init__(self, num_jobs=5, num_machines=3):
        self.num_jobs = num_jobs
        self.num_machines = num_machines

        self.processes = []
        for job_idx in range(num_jobs):
            job_processes = []
            num_ops = random.randint(3, 5)
            for op_idx in range(num_ops):
                possible_machines = random.sample(range(num_machines),
                                                  random.randint(1, min(3, num_machines)))
                times = [random.randint(5, 20) for _ in possible_machines]
                job_processes.append(list(zip(possible_machines, times)))
            self.processes.append(job_processes)

        self.reset()

    def reset(self):
        self.job_progress = [0] * self.num_jobs
        self.machine_times = [0] * self.num_machines
        self.completed_jobs = 0
        self.current_makespan = 0
        self.schedule_details = []
        return self._get_state()

    def _get_state(self):
        state = []
        max_possible_time = sum(max(t for _, t in op) for job in self.processes for op in job)
        state.extend([t / max_possible_time for t in self.machine_times])

        max_ops = max(len(job) for job in self.processes)
        state.extend([p / max_ops for p in self.job_progress])

        for job_idx in range(self.num_jobs):
            if self.job_progress[job_idx] < len(self.processes[job_idx]):
                possible_machines = [m for m, _ in self.processes[job_idx][self.job_progress[job_idx]]]
                state.extend([1.0 if m in possible_machines else 0.0 for m in range(self.num_machines)])
            else:
                state.extend([0.0] * self.num_machines)
        return np.array(state, dtype=np.float32)

    def step(self, action):
        job_idx, machine_idx = action
        done = False
        reward = 0

        op_idx = self.job_progress[job_idx]
        if (op_idx >= len(self.processes[job_idx]) or
                machine_idx not in [m for m, _ in self.processes[job_idx][op_idx]]):
            return self._get_state(), -100, done

        process_time = next(t for m, t in self.processes[job_idx][op_idx] if m == machine_idx)
        start_time = max(self.machine_times[machine_idx], self._get_job_last_end_time(job_idx))
        end_time = start_time + process_time

        self.machine_times[machine_idx] = end_time
        self.job_progress[job_idx] += 1
        if self.job_progress[job_idx] == len(self.processes[job_idx]):
            self.completed_jobs += 1

        self.schedule_details.append({
            "job_id": job_idx,
            "op_id": op_idx,
            "machine_id": machine_idx,
            "start_time": start_time,
            "end_time": end_time
        })

        new_makespan = max(self.machine_times)
        reward = self.current_makespan - new_makespan
        self.current_makespan = new_makespan

        if self.completed_jobs == self.num_jobs:
            done = True
            reward += 1000 / new_makespan

        return self._get_state(), reward, done

    def _get_job_last_end_time(self, job_idx):
        if self.job_progress[job_idx] == 0:
            return 0
        last_op_details = [d for d in self.schedule_details
                           if d["job_id"] == job_idx and d["op_id"] == self.job_progress[job_idx] - 1]
        return last_op_details[0]["end_time"] if last_op_details else 0

    def get_valid_actions(self):
        valid_actions = []
        for job_idx in range(self.num_jobs):
            op_idx = self.job_progress[job_idx]
            if op_idx < len(self.processes[job_idx]):
                valid_machines = [m for m, _ in self.processes[job_idx][op_idx]]
                valid_actions.extend([(job_idx, m) for m in valid_machines])
        return valid_actions


# --------------------------
# DQN算法
# --------------------------
class DQNNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super().__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, action_dim)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        return self.fc3(x)


Experience = namedtuple('Experience', ('state', 'action', 'reward', 'next_state', 'done'))


class DQNAgent:
    def __init__(self, state_dim, action_space, lr=1e-3, gamma=0.99, epsilon=1.0, batch_size=64):
        self.state_dim = state_dim
        self.action_space = action_space
        self.action_dim = len(action_space)
        self.action_to_idx = {a: i for i, a in enumerate(action_space)}

        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.batch_size = batch_size

        self.policy_net = DQNNetwork(state_dim, self.action_dim)
        self.target_net = DQNNetwork(state_dim, self.action_dim)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=lr)
        self.memory = deque(maxlen=10000)
        self.loss_fn = nn.MSELoss()

    def select_action(self, state, valid_actions, greedy=True):
        if not greedy and random.random() < self.epsilon:
            return random.choice(valid_actions)
        with torch.no_grad():
            state_tensor = torch.tensor(state).unsqueeze(0)
            q_values = self.policy_net(state_tensor)
            valid_indices = [self.action_to_idx[a] for a in valid_actions]
            max_q_idx = valid_indices[torch.argmax(q_values[0, valid_indices]).item()]
            return self.action_space[max_q_idx]

    def store_experience(self, *args):
        self.memory.append(Experience(*args))

    def update_policy(self):
        if len(self.memory) < self.batch_size:
            return 0
        experiences = random.sample(self.memory, self.batch_size)
        states = torch.tensor(np.array([e.state for e in experiences]))
        actions = torch.tensor([self.action_to_idx[e.action] for e in experiences])
        rewards = torch.tensor([e.reward for e in experiences])
        next_states = torch.tensor(np.array([e.next_state for e in experiences]))
        dones = torch.tensor([e.done for e in experiences], dtype=torch.float32)

        current_q = self.policy_net(states).gather(1, actions.unsqueeze(1)).squeeze(1)
        with torch.no_grad():
            next_q = self.target_net(next_states).max(1)[0]
            target_q = rewards + (1 - dones) * self.gamma * next_q

        loss = self.loss_fn(current_q, target_q)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
        return loss.item()

    def update_target_network(self):
        self.target_net.load_state_dict(self.policy_net.state_dict())


# --------------------------
# PPO算法（修复log_prob输入类型错误）
# --------------------------
class PPONetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super().__init__()
        self.shared = nn.Sequential(nn.Linear(state_dim, 128), nn.Tanh(), nn.Linear(128, 128), nn.Tanh())
        self.actor = nn.Linear(128, action_dim)
        self.critic = nn.Linear(128, 1)

    def forward(self, x):
        x = self.shared(x)
        return self.actor(x), self.critic(x)


class PPOAgent:
    def __init__(self, state_dim, action_space, lr=3e-4, gamma=0.99, clip_epsilon=0.2, K_epochs=10, batch_size=64):
        self.state_dim = state_dim
        self.action_space = action_space
        self.action_dim = len(action_space)
        self.action_to_idx = {a: i for i, a in enumerate(action_space)}

        self.gamma = gamma
        self.clip_epsilon = clip_epsilon
        self.K_epochs = K_epochs
        self.batch_size = batch_size

        self.network = PPONetwork(state_dim, self.action_dim)
        self.optimizer = optim.Adam(self.network.parameters(), lr=lr)
        self.mse_loss = nn.MSELoss()

    def select_action(self, state, valid_actions, greedy=True):
        state_tensor = torch.tensor(state).unsqueeze(0)
        logits, value = self.network(state_tensor)

        valid_indices = [self.action_to_idx[a] for a in valid_actions]
        mask = torch.zeros(self.action_dim)
        mask[valid_indices] = 1.0
        masked_logits = logits + (mask - 1) * 1e10

        if greedy:
            action_idx = torch.argmax(masked_logits, dim=1).item()
        else:
            dist = torch.distributions.Categorical(logits=masked_logits)
            action_idx = dist.sample().item()
        # 根据索引获取实际动作（修复的核心）
        action = self.action_space[action_idx]
        return action
        return action

    def compute_gae(self, rewards, values, dones, next_value):
        advantages = []
        last_adv = 0
        last_val = next_value
        for t in reversed(range(len(rewards))):
            delta = rewards[t] + self.gamma * last_val * (1 - dones[t]) - values[t]
            last_adv = delta + self.gamma * 0.95 * (1 - dones[t]) * last_adv
            advantages.insert(0, last_adv)
            last_val = values[t]
        return advantages, [a + v for a, v in zip(advantages, values)]

    def update_policy(self, trajectories):
        states = torch.tensor(np.array([s for traj in trajectories for s in traj['states']]))
        actions = torch.tensor([self.action_to_idx[a] for traj in trajectories for a in traj['actions']])
        old_probs = torch.tensor([p for traj in trajectories for p in traj['probs']])
        advantages = torch.tensor([a for traj in trajectories for a in traj['advantages']])
        returns = torch.tensor([r for traj in trajectories for r in traj['returns']])

        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
        total_loss = 0

        for _ in range(self.K_epochs):
            indices = torch.randperm(len(states))
            for start in range(0, len(states), self.batch_size):
                end = start + self.batch_size
                batch_idx = indices[start:end]

                logits, values = self.network(states[batch_idx])
                dist = torch.distributions.Categorical(logits=logits)
                new_probs = dist.log_prob(actions[batch_idx])

                ratio = torch.exp(new_probs - old_probs[batch_idx])
                surr1 = ratio * advantages[batch_idx]
                surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages[batch_idx]
                actor_loss = -torch.min(surr1, surr2).mean()
                critic_loss = self.mse_loss(values.squeeze(), returns[batch_idx])
                loss = actor_loss + 0.5 * critic_loss

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                total_loss += loss.item()

        return total_loss / (self.K_epochs * (len(states) // self.batch_size + 1))


# --------------------------
# 训练函数（修复关键错误）
# --------------------------
def train_dqn(env, episodes=300):
    all_actions = list(set([(j, m) for j in range(env.num_jobs) for op in env.processes[j] for m, _ in op]))
    state_dim = len(env.reset())
    agent = DQNAgent(state_dim, all_actions)
    history = {"makespan": [], "reward": [], "loss": []}

    print("开始DQN训练...")
    start_time = time.time()
    for episode in range(episodes):
        state = env.reset()
        total_reward, total_loss, done, steps = 0, 0, False, 0
        while not done and steps < 1000:
            valid_actions = env.get_valid_actions()
            action = agent.select_action(state, valid_actions, greedy=False)
            next_state, reward, done = env.step(action)
            agent.store_experience(state, action, reward, next_state, done)
            total_loss += agent.update_policy()
            state, total_reward, steps = next_state, total_reward + reward, steps + 1

        if episode % 10 == 0:
            agent.update_target_network()
        history["makespan"].append(env.current_makespan)
        history["reward"].append(total_reward)
        history["loss"].append(total_loss / steps if steps > 0 else 0)

        if (episode + 1) % 20 == 0:
            avg_makespan = np.mean(history["makespan"][-20:])
            print(
                f"DQN Episode {episode + 1}/{episodes} | 平均Makespan: {avg_makespan:.2f} | 探索率: {agent.epsilon:.3f}")

    print(f"DQN训练完成，耗时: {time.time() - start_time:.2f}秒")
    return agent, history


def train_ppo(env, episodes=300, traj_per_update=4):
    all_actions = list(set([(j, m) for j in range(env.num_jobs) for op in env.processes[j] for m, _ in op]))
    state_dim = len(env.reset())
    agent = PPOAgent(state_dim, all_actions)
    history = {"makespan": [], "reward": [], "loss": []}

    print("\n开始PPO训练...")
    start_time = time.time()
    for episode in range(episodes):
        trajectories = []
        for _ in range(traj_per_update):
            state = env.reset()
            states, actions, probs, rewards, values, dones = [], [], [], [], [], []
            done, steps = False, 0
            while not done and steps < 1000:
                valid_actions = env.get_valid_actions()
                # 修复：保持action_idx为Tensor类型计算log_prob
                with torch.no_grad():
                    logits, val = agent.network(torch.tensor(state).unsqueeze(0))
                    valid_idx = [agent.action_to_idx[a] for a in valid_actions]
                    mask = torch.zeros(agent.action_dim)
                    mask[valid_idx] = 1.0
                    masked_logits = logits + (mask - 1) * 1e10
                    dist = torch.distributions.Categorical(logits=masked_logits)
                    action_idx = dist.sample()  # 不转换为标量，保持Tensor类型
                    prob = dist.log_prob(action_idx).item()  # 此时传入的是Tensor
                    action = agent.action_space[action_idx.item()]  # 需要索引时再转标量

                next_state, reward, done = env.step(action)
                states.append(state)
                actions.append(action)
                probs.append(prob)
                rewards.append(reward)
                values.append(val.item())
                dones.append(done)
                state, steps = next_state, steps + 1

            with torch.no_grad():
                _, last_val = agent.network(torch.tensor(state).unsqueeze(0))
            advantages, returns = agent.compute_gae(rewards, values, dones, last_val.item())
            trajectories.append(
                {"states": states, "actions": actions, "probs": probs, "advantages": advantages, "returns": returns})

            history["makespan"].append(env.current_makespan)
            history["reward"].append(sum(rewards))

        loss = agent.update_policy(trajectories)
        history["loss"].append(loss)

        if (episode + 1) % 20 == 0:
            avg_makespan = np.mean(history["makespan"][-20 * traj_per_update:])
            print(f"PPO Episode {episode + 1}/{episodes} | 平均Makespan: {avg_makespan:.2f} | 损失: {loss:.4f}")

    print(f"PPO训练完成，耗时: {time.time() - start_time:.2f}秒")
    return agent, history


# --------------------------
# 最优调度与甘特图绘制
# --------------------------
def get_best_schedule(agent, env, num_trials=20, is_ppo=True):
    best_makespan = float('inf')
    best_details = None

    for _ in range(num_trials):
        env.reset()
        done = False
        while not done:
            state = env._get_state()
            valid_actions = env.get_valid_actions()
            if is_ppo:
                action = agent.select_action(state, valid_actions, greedy=True)
            else:
                action = agent.select_action(state, valid_actions, greedy=True)
            _, _, done = env.step(action)

        if env.current_makespan < best_makespan:
            best_makespan = env.current_makespan
            best_details = env.schedule_details.copy()

    print(f"\n最优调度结果：Makespan = {best_makespan}")
    return best_details, best_makespan


def plot_gantt_chart(schedule_details, makespan, num_machines):
    job_ids = list(set([d["job_id"] for d in schedule_details]))
    colors = plt.cm.hsv(np.linspace(0, 0.8, len(job_ids)))
    job_color_map = {job_id: colors[i] for i, job_id in enumerate(job_ids)}

    fig, ax = plt.subplots(figsize=(12, 6))

    for detail in schedule_details:
        job_id = detail["job_id"]
        op_id = detail["op_id"]
        machine_id = detail["machine_id"]
        start = detail["start_time"]
        end = detail["end_time"]

        ax.barh(
            y=machine_id,
            left=start,
            width=end - start,
            height=0.6,
            color=job_color_map[job_id],
            edgecolor='black',
            alpha=0.8
        )

        ax.text(
            x=start + (end - start) / 2,
            y=machine_id,
            s=f"J{job_id + 1}-O{op_id + 1}",
            ha='center',
            va='center',
            fontsize=8,
            fontweight='bold'
        )

    ax.set_yticks(range(num_machines))
    ax.set_yticklabels([f"机器{M + 1}" for M in range(num_machines)], fontsize=10)
    ax.set_xlabel("时间", fontsize=12, fontweight='bold')
    ax.set_ylabel("机器", fontsize=12, fontweight='bold')
    ax.set_title(f"柔性作业车间最优调度甘特图（Makespan = {makespan}）", fontsize=14, fontweight='bold', pad=20)

    ax.set_xlim(0, makespan * 1.05)
    ax.grid(axis='x', alpha=0.3, linestyle='--')
    ax.set_axisbelow(True)

    legend_elements = [Patch(facecolor=job_color_map[job_id], edgecolor='black', label=f"作业{job_id + 1}")
                       for job_id in job_ids]
    ax.legend(handles=legend_elements, loc='upper right', title="作业颜色对应", title_fontsize=10, fontsize=9)

    plt.tight_layout()
    plt.savefig("fjsp_best_schedule_gantt.png", dpi=300, bbox_inches='tight')
    print("甘特图已保存为：fjsp_best_schedule_gantt.png")
    plt.show()


# --------------------------
# 主函数
# --------------------------
if __name__ == "__main__":
    env = FJSPEnv(num_jobs=5, num_machines=3)
    ppo_agent, _ = train_ppo(env, episodes=300)
    best_schedule, best_makespan = get_best_schedule(ppo_agent, env, num_trials=20, is_ppo=True)
    plot_gantt_chart(best_schedule, best_makespan, env.num_machines)
