#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T2.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/9 18:33
多个单一调度规则作为动作集合的深度强化学习
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Flexible Job Shop Scheduling (FJSP) with Rule-as-Action DQN
- 动作集合 = 多个经典派工规则（SPT, LPT, FIFO, MWKR, EDD, CR, MOPNR, LWKR）
- 智能体每步选择一个规则；环境用该规则从全部可派工候选中挑选实际的(作业-工序-机器)进行调度
- 奖励 = makespan 增量的负值 + 轻微步惩罚（越小越好）
- 含简单甘特图可视化

Usage:
  pip install torch numpy matplotlib
  python fjsp_rule_actions_dqn.py
"""

import random
from dataclasses import dataclass
from typing import Dict, List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
import matplotlib.pyplot as plt

# =============== 基本问题定义 ===============

@dataclass
class Operation:
    # 可选机器及加工时间: machine_id -> processing_time
    machine_time: Dict[int, int]

@dataclass
class Job:
    ops: List[Operation]
    due: float  # 交期（用于 EDD/CR 等规则）

@dataclass
class Problem:
    jobs: List[Job]
    m: int  # 机器数

    @property
    def n_jobs(self):
        return len(self.jobs)

    @property
    def n_ops_total(self):
        return sum(len(j.ops) for j in self.jobs)

    def max_ops_per_job(self):
        return max(len(j.ops) for j in self.jobs)

def build_demo_problem(n_jobs: int = 12, m: int = 5, ops_per_job: int = 4, seed: int = 7) -> Problem:
    """
    随机生成一个 FJSP 实例：
    - 每道工序可在 1~2 台机器上加工，耗时 1..9
    - 每个作业随机给一个交期（粗略基于预计总工时+裕量）
    """
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        total_pt_est = 0
        for _o in range(ops_per_job):
            k = 2 if m >= 2 else 1
            machines = sorted(rng.sample(range(m), k=k))
            mt = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(mt))
            total_pt_est += min(mt.values())
        due = total_pt_est + rng.randint(5, 20)  # 简单交期：预计时间 + 随机裕量
        jobs.append(Job(ops=ops, due=due))
    return Problem(jobs=jobs, m=m)

# =============== 环境（规则动作 -> 实际调度） ===============

RULES = [
    "SPT",   # 最短加工时间
    "LPT",   # 最长加工时间
    "FIFO",  # 先来先服务（按作业就绪时间）
    "MWKR",  # 剩余工作量最多
    "LWKR",  # 剩余工作量最少
    "EDD",   # 最早交期
    "CR",    # 紧急系数 (due - now) / remaining_work
    "MOPNR", # 剩余工序数最多
]

class FJSPRuleEnv:
    """
    状态：机器就绪时间向量、作业就绪时间向量、作业下一工序索引、作业剩余工时、作业交期
    动作：从 RULES 中选一个规则
    转换：用该规则对“所有可派工 (j,k,m)”打分，选最优候选，按最早可开工时间开工并更新日程
    结束：所有工序都已调度
    奖励：-Δmakespan - 0.01
    """
    def __init__(self, pb: Problem):
        self.pb = pb
        self.n_jobs = pb.n_jobs
        self.m = pb.m
        self.max_ops = pb.max_ops_per_job()
        self.action_dim = len(RULES)
        self.reset()

    def reset(self):
        self.next_op = np.zeros(self.n_jobs, dtype=np.int32)
        self.job_ready = np.zeros(self.n_jobs, dtype=np.float32)
        self.machine_ready = np.zeros(self.m, dtype=np.float32)
        # 预计算每个作业剩余总工时（用每道工序的最短可能时间作为估计）
        self.rem_work = np.array([sum(min(op.machine_time.values()) for op in job.ops)
                                  for job in self.pb.jobs], dtype=np.float32)
        self.makespan = 0.0
        self.done_ops = 0
        # 记录 (j,k) -> (m, s, e)
        self.assign: Dict[Tuple[int, int], Tuple[int, float, float]] = {}
        return self._state()

    def _state(self) -> np.ndarray:
        # 归一化一些量，拼接成状态向量
        mr_max = float(np.max(self.machine_ready)) if self.m > 0 else 1.0
        jr_max = float(np.max(self.job_ready)) if self.n_jobs > 0 else 1.0
        rw_max = float(np.max(self.rem_work)) if self.n_jobs > 0 else 1.0
        # 机器就绪、作业就绪、下一工序索引、剩余工时、交期（简单归一）
        mr = self.machine_ready / (1.0 + max(mr_max, 1.0))
        jr = self.job_ready / (1.0 + max(jr_max, 1.0))
        noi = self.next_op.astype(np.float32) / max(float(self.max_ops), 1.0)
        rw = self.rem_work / (1.0 + max(rw_max, 1.0))
        due = np.array([job.due for job in self.pb.jobs], dtype=np.float32)
        dd_max = float(np.max(due)) if due.size else 1.0
        due = due / (1.0 + max(dd_max, 1.0))
        return np.concatenate([mr, jr, noi, rw, due], axis=0).astype(np.float32)

    # 计算候选集合：所有作业的下一工序在其可行机器上的组合
    def _candidates(self):
        cands = []  # list of (j, k, m, pt, start, end)
        for j in range(self.n_jobs):
            k = int(self.next_op[j])
            if k >= len(self.pb.jobs[j].ops):
                continue
            op = self.pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(self.machine_ready[mm]), float(self.job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        return cands

    # 各种规则的评分：返回一个候选的分数（越小越好）
    def _score(self, rule: str, cand, now_for_job: float, rem_work_j: float, due_j: float):
        # cand = (j, k, mm, pt, start, end)
        pt = cand[3]; start = cand[4]; end = cand[5]
        # 剩余工作量（不含本道工序的最短估计）
        # 这里 rem_work_j 先前包含了所有未做工序的最短时间之和
        if rule == "SPT":
            return pt
        if rule == "LPT":
            return -pt  # 取最小 => 等价于最大 pt
        if rule == "FIFO":
            return now_for_job  # 作业就绪越早越优
        if rule == "MWKR":
            return -rem_work_j   # 剩余工时越多越优
        if rule == "LWKR":
            return rem_work_j    # 剩余工时越少越优
        if rule == "EDD":
            return due_j         # 交期越早越优
        if rule == "CR":
            # critical ratio = (due - now) / remaining_work
            # 越小越紧迫；若 rem_work_j ~ 0 用小常数避免除零
            denom = max(rem_work_j, 1e-6)
            return (due_j - now_for_job) / denom
        if rule == "MOPNR":
            # 剩余工序越多越优
            return -(self.pb.max_ops_per_job() - (cand[1] + 1))
        # 默认：SPT
        return pt

    def step(self, action_rule_id: int):
        rule = RULES[action_rule_id]
        cands = self._candidates()
        if not cands:
            # 无候选（应当是终止态），给小惩罚
            return self._state(), -0.5, True, {}

        # 对所有候选用该规则打分，选分数最小的
        # 注意：有的规则是“最大化”的，我们统一转化为“最小化”形式（见 _score）
        best = None
        best_score = float("inf")
        for (j, k, mm, pt, start, end) in cands:
            now_j = float(self.job_ready[j])
            rem_j = float(self.rem_work[j])
            due_j = float(self.pb.jobs[j].due)
            s = self._score(rule, (j, k, mm, pt, start, end), now_j, rem_j, due_j)
            # tie-break：若分数相等，选 end 更小者，再选 start 更小者
            if (s < best_score) or (abs(s - best_score) < 1e-9 and end < best[5]) or \
               (abs(s - best_score) < 1e-9 and abs(end - best[5]) < 1e-9 and start < best[4]):
                best = (j, k, mm, pt, start, end)
                best_score = s

        j, k, mm, pt, start, end = best
        # 执行该候选
        self.machine_ready[mm] = end
        self.job_ready[j] = end
        self.assign[(j, k)] = (mm, start, end)
        old_makespan = self.makespan
        self.makespan = max(self.makespan, end)
        self.next_op[j] += 1
        # 更新作业剩余工时估计：减去该工序（取该工序最短可能时间的估计，或直接用 pt）
        # 这里选择减 min_time（更稳健）：
        min_time_this_op = min(self.pb.jobs[j].ops[k].machine_time.values())
        self.rem_work[j] = max(0.0, self.rem_work[j] - float(min_time_this_op))
        self.done_ops += 1

        reward = -(self.makespan - old_makespan) - 0.01
        done = self.done_ops == self.pb.n_ops_total
        return self._state(), reward, done, {}

    # 训练时可选（这里不需要屏蔽动作，因为所有规则随时可选）
    def available_actions_mask(self) -> np.ndarray:
        return np.ones(self.action_dim, dtype=np.float32)

# =============== DQN 智能体（动作是规则） ===============

class QNet(nn.Module):
    def __init__(self, state_dim: int, action_dim: int):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.ReLU(),
            nn.Linear(256, action_dim)
        )
    def forward(self, x):
        return self.net(x)

class DQNAgent:
    def __init__(self, state_dim: int, action_dim: int,
                 lr: float = 1e-3, gamma: float = 0.99,
                 eps_start: float = 1.0, eps_end: float = 0.05, eps_decay: float = 0.997,
                 buffer_size: int = 100_000, batch_size: int = 256, target_tau: float = 1.0):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.q = QNet(state_dim, action_dim).to(self.device)
        self.q_target = QNet(state_dim, action_dim).to(self.device)
        self.q_target.load_state_dict(self.q.state_dict())
        self.opt = optim.Adam(self.q.parameters(), lr=lr)
        self.gamma = gamma
        self.eps = eps_start
        self.eps_end = eps_end
        self.eps_decay = eps_decay
        self.batch_size = batch_size
        self.target_tau = target_tau
        self.memory = deque(maxlen=buffer_size)

    def act(self, state: np.ndarray, valid_mask: np.ndarray) -> int:
        if np.random.rand() < self.eps:
            valid_indices = np.nonzero(valid_mask)[0]
            return int(np.random.choice(valid_indices))
        with torch.no_grad():
            s = torch.tensor(state, dtype=torch.float32, device=self.device).unsqueeze(0)
            qvals = self.q(s).squeeze(0).cpu().numpy()
            # 这里每个规则始终可用，无需屏蔽；为通用性保留 mask
            qvals[valid_mask < 0.5] = -1e9
            return int(np.argmax(qvals))

    def remember(self, s, a, r, s2, d):
        self.memory.append((s, a, r, s2, d))

    def train_step(self):
        if len(self.memory) < self.batch_size:
            return 0.0
        batch = random.sample(self.memory, self.batch_size)
        s = torch.tensor(np.stack([b[0] for b in batch]), dtype=torch.float32, device=self.device)
        a = torch.tensor([b[1] for b in batch], dtype=torch.int64, device=self.device).unsqueeze(1)
        r = torch.tensor([b[2] for b in batch], dtype=torch.float32, device=self.device).unsqueeze(1)
        s2 = torch.tensor(np.stack([b[3] for b in batch]), dtype=torch.float32, device=self.device)
        d = torch.tensor([b[4] for b in batch], dtype=torch.float32, device=self.device).unsqueeze(1)

        q_curr = self.q(s).gather(1, a)
        with torch.no_grad():
            q_next_max = self.q_target(s2).max(dim=1, keepdim=True).values
            target = r + (1.0 - d) * self.gamma * q_next_max

        loss = nn.MSELoss()(q_curr, target)
        self.opt.zero_grad()
        loss.backward()
        nn.utils.clip_grad_norm_(self.q.parameters(), 1.0)
        self.opt.step()

        # target 更新（这里用硬更新；如需软更新把 target_tau<1）
        if self.target_tau >= 1.0:
            self.q_target.load_state_dict(self.q.state_dict())
        else:
            for tp, p in zip(self.q_target.parameters(), self.q.parameters()):
                tp.data.copy_(tp.data * (1.0 - self.target_tau) + p.data * self.target_tau)

        self.eps = max(self.eps_end, self.eps * self.eps_decay)
        return float(loss.item())

# =============== 训练 & 评估 & 可视化 ===============

def train_dqn(env: FJSPRuleEnv, episodes: int = 1200):
    state_dim = env._state().size
    agent = DQNAgent(state_dim, env.action_dim,
                     lr=1e-3, gamma=0.99,
                     eps_start=1.0, eps_end=0.05, eps_decay=0.997,
                     buffer_size=100000, batch_size=256, target_tau=1.0)

    hist = []
    for ep in range(1, episodes + 1):
        s = env.reset()
        mask = env.available_actions_mask()
        ep_reward = 0.0
        ep_loss = 0.0
        steps = 0
        while True:
            a = agent.act(s, mask)
            s2, r, done, _ = env.step(a)
            agent.remember(s, a, r, s2, float(done))
            l = agent.train_step()
            ep_reward += r
            ep_loss += l
            steps += 1
            s = s2
            if done:
                break
        hist.append((ep, ep_reward, ep_loss, env.makespan))
        if ep % 50 == 0 or ep == 1:
            print(f"Episode {ep:4d} | steps={steps:3d} | reward={ep_reward:8.3f} | loss={ep_loss:8.3f} | makespan={env.makespan:7.2f} | eps={agent.eps:5.3f}")
    return agent, hist

def evaluate_policy(env: FJSPRuleEnv, agent: DQNAgent, greedy: bool = True):
    prev_eps = agent.eps
    if greedy:
        agent.eps = 0.0
    s = env.reset()
    mask = env.available_actions_mask()
    while True:
        a = agent.act(s, mask)
        s, r, done, _ = env.step(a)
        if done:
            break
    agent.eps = prev_eps
    return env.assign, env.makespan

def plot_gantt(pb: Problem, assign: Dict[Tuple[int, int], Tuple[int, float, float]], title: str = "Schedule"):
    bars = {mm: [] for mm in range(pb.m)}
    for (j, k), (mm, s, e) in assign.items():
        bars[mm].append((s, e - s, f"J{j}-O{k}"))
    fig, ax = plt.subplots(figsize=(12, 5))
    yticks, ylabels = [], []
    for idx, mm in enumerate(sorted(bars.keys())):
        items = sorted(bars[mm], key=lambda x: x[0])
        for (start, dur, label) in items:
            ax.barh(idx, dur, left=start)
            ax.text(start + dur/2, idx, label, ha="center", va="center", fontsize=8)
        yticks.append(idx); ylabels.append(f"M{mm}")
    ax.set_yticks(yticks); ax.set_yticklabels(ylabels)
    ax.set_xlabel("Time"); ax.set_title(title)
    plt.tight_layout(); plt.show()

# =============== 主函数 ===============

def main():
    # 这里给一个中等规模示例；要用你指定的 20×4×5，把参数改为 n_jobs=20, m=5, ops_per_job=4
    # pb = build_demo_problem(n_jobs=12, m=5, ops_per_job=4, seed=7)
    pb = build_demo_problem(n_jobs=40, m=6, ops_per_job=8, seed=7)
    env = FJSPRuleEnv(pb)
    agent, _ = train_dqn(env, episodes=800)

    assign, makespan = evaluate_policy(env, agent, greedy=True)
    print(f"Final greedy makespan: {makespan:.2f}")
    for (j, k), (mm, s, e) in sorted(assign.items()):
        print(f"Job {j} Op {k} -> M{mm}, start={s:.1f}, end={e:.1f}")
    plot_gantt(pb, assign, title=f"Rule-as-Action FJSP DQN (makespan={makespan:.1f})")

if __name__ == "__main__":
    main()
