#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T5.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/9 19:48 
'''
# 遗传规划算法求解组合调度规则然后接深度强化学习-然后会对比各个规则的时间
# --- TermNode 修复版 ---
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Pipeline: GP evolves composite dispatching rules → Compare with best single rule (Gantt + bar)
        → DQN uses top-K GP rules as actions for FJSP scheduling.

新增内容：
- 在 GP 阶段结束后、DQN 训练之前：
  * 选出最佳单一规则（在 SPT/LPT/FIFO/EDD/CR/MWKR/LWKR/MOPNR 中 makespan 最小的那条）
  * 与 GP 最优组合规则在同一问题上调度，绘制：
      1) 并排甘特图（单一规则 vs 组合规则）
      2) 完工时间对比条形图

默认问题规模较小以便快速演示。
如需 20×4×5，请在 main() 里改 n_jobs=20, m=5, ops_per_job=4，并适当增大 gp_gens 与 dqn_episodes。
"""

import math
import random
from dataclasses import dataclass
from typing import Dict, List, Tuple, Callable, Optional
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque

# =========================
# Problem Definition (FJSP)
# =========================

@dataclass
class Operation:
    machine_time: Dict[int, int]  # machine_id -> processing_time

@dataclass
class Job:
    ops: List[Operation]
    due: float  # due date for job-level features (EDD/CR/slack)

@dataclass
class Problem:
    jobs: List[Job]
    m: int  # number of machines

    @property
    def n_jobs(self): return len(self.jobs)

    @property
    def n_ops_total(self): return sum(len(j.ops) for j in self.jobs)

    def max_ops_per_job(self): return max((len(j.ops) for j in self.jobs), default=0)


def build_demo_problem(n_jobs: int = 12, m: int = 5, ops_per_job: int = 4, seed: int = 7) -> Problem:
    """
    Random FJSP instance:
    - Each operation can run on 1-2 machines, processing time 1..9
    - Job due date = sum of op minimal times + random slack
    """
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        est_total = 0
        for _o in range(ops_per_job):
            k = 2 if m >= 2 else 1
            machines = sorted(rng.sample(range(m), k=k))
            mt = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(mt))
            est_total += min(mt.values())
        due = est_total + rng.randint(5, 20)
        jobs.append(Job(ops=ops, due=float(due)))
    return Problem(jobs=jobs, m=m)

# =========================
# GP Expression Trees
# =========================

def pdiv(a: float, b: float) -> float:
    return a / (abs(b) + 1e-6)

FUNCS: Dict[str, Tuple[int, Callable[..., float]]] = {
    "add": (2, lambda a,b: a+b),
    "sub": (2, lambda a,b: a-b),
    "mul": (2, lambda a,b: a*b),
    "div": (2, pdiv),     # protected division
    "min": (2, lambda a,b: a if a<b else b),
    "max": (2, lambda a,b: a if a>b else b),
    "abs": (1, lambda a: abs(a)),
    "neg": (1, lambda a: -a),
}

TERMS_KEYS = ["pt", "jr", "mr", "now", "rem_work", "rem_ops", "due", "slack"]

class Node:
    def eval(self, ctx: Dict[str, float]) -> float: raise NotImplementedError
    def copy(self) -> "Node": raise NotImplementedError
    def depth(self) -> int: raise NotImplementedError
    def __str__(self) -> str: raise NotImplementedError

class FuncNode(Node):
    def __init__(self, name: str, children: List["Node"]):
        self.name = name
        self.arity, self.fn = FUNCS[name]
        assert len(children) == self.arity
        self.children = children
    def eval(self, ctx: Dict[str, float]) -> float:
        vals = [c.eval(ctx) for c in self.children]
        return float(self.fn(*vals))
    def copy(self) -> "FuncNode":
        return FuncNode(self.name, [c.copy() for c in self.children])
    def depth(self) -> int:
        return 1 + max(c.depth() for c in self.children)
    def __str__(self) -> str:
        if self.arity == 1:
            return f"{self.name}({self.children[0]})"
        return f"({self.name} {self.children[0]} {self.children[1]})"

class TermNode(Node):
    def __init__(self, key: Optional[str] = None, const: Optional[float] = None):
        # 修复：只能二选一，key 与 const 不能同时为空或同时非空
        assert (key is not None) ^ (const is not None)
        self.key, self.const = key, const
    def eval(self, ctx: Dict[str, float]) -> float:
        return float(ctx[self.key]) if self.key is not None else float(self.const)
    def copy(self) -> "TermNode":
        return TermNode(self.key, self.const)
    def depth(self) -> int:
        return 1
    def __str__(self) -> str:
        return self.key if self.key is not None else f"{self.const:.3f}"

def random_terminal(rng: random.Random) -> Node:
    if rng.random() < 0.2:
        return TermNode(const=rng.uniform(-2.0, 2.0))
    return TermNode(key=rng.choice(TERMS_KEYS))

def random_function(rng: random.Random) -> str:
    return rng.choice(list(FUNCS.keys()))

def grow_random_tree(rng: random.Random, max_depth: int) -> Node:
    if max_depth <= 1 or rng.random() < 0.3:
        return random_terminal(rng)
    name = random_function(rng)
    arity, _ = FUNCS[name]
    return FuncNode(name, [grow_random_tree(rng, max_depth-1) for _ in range(arity)])

def full_random_tree(rng: random.Random, max_depth: int) -> Node:
    if max_depth <= 1:
        return random_terminal(rng)
    name = random_function(rng)
    arity, _ = FUNCS[name]
    return FuncNode(name, [full_random_tree(rng, max_depth-1) for _ in range(arity)])

def random_tree(rng: random.Random, max_depth: int) -> Node:
    return grow_random_tree(rng, max_depth) if rng.random() < 0.5 else full_random_tree(rng, max_depth)

# Preorder path utilities for safe subtree ops
def enumerate_paths(root: Node, path: List[int] = None):
    if path is None: path = []
    yield path, root
    if isinstance(root, FuncNode):
        for i, ch in enumerate(root.children):
            yield from enumerate_paths(ch, path + [i])

def get_by_path(root: Node, path: List[int]) -> Node:
    node = root
    for idx in path:
        node = node.children[idx]  # type: ignore
    return node

def set_by_path(root: Node, path: List[int], new_node: Node) -> Node:
    if not path:  # replace root
        return new_node.copy()
    root_copy = root.copy()
    parent = root_copy
    for idx in path[:-1]:
        parent = parent.children[idx]  # type: ignore
    parent.children[path[-1]] = new_node.copy()  # type: ignore
    return root_copy

def subtree_crossover(rng: random.Random, a: Node, b: Node, max_depth: int) -> Tuple[Node, Node]:
    a2, b2 = a.copy(), b.copy()
    a_paths = [p for p,_ in enumerate_paths(a2)]
    b_paths = [p for p,_ in enumerate_paths(b2)]
    pa = rng.choice(a_paths)
    pb = rng.choice(b_paths)
    sub_b = get_by_path(b2, pb)
    sub_a = get_by_path(a2, pa)
    cand_a = set_by_path(a2, pa, sub_b)
    cand_b = set_by_path(b2, pb, sub_a)
    if cand_a.depth() <= max_depth: a2 = cand_a
    if cand_b.depth() <= max_depth: b2 = cand_b
    return a2, b2

def subtree_mutation(rng: random.Random, root: Node, max_depth: int) -> Node:
    # with small prob, tweak ERC
    paths = [p for p,_ in enumerate_paths(root)]
    p = rng.choice(paths)
    node = get_by_path(root, p)
    if isinstance(node, TermNode) and node.const is not None and rng.random() < 0.3:
        nudged = TermNode(const=node.const + rng.uniform(-1.0, 1.0))
        cand = set_by_path(root, p, nudged)
        return cand if cand.depth() <= max_depth else root.copy()
    # else replace subtree
    new_sub = random_tree(rng, max_depth=max(2, rng.randint(2, max_depth)))
    cand = set_by_path(root, p, new_sub)
    return cand if cand.depth() <= max_depth else root.copy()

# Classic rule seeds
def term(name: str) -> Node: return TermNode(key=name)
def const(c: float) -> Node: return TermNode(const=c)
def f(name: str, *chs: Node) -> Node: return FuncNode(name, list(chs))

def seed_rules() -> List[Node]:
    spt   = term("pt")
    lpt   = f("neg", term("pt"))
    fifo  = term("jr")
    edd   = term("due")
    cr    = f("div", f("sub", term("due"), term("now")), f("add", term("rem_work"), const(1e-3)))
    mwkr  = f("neg", term("rem_work"))
    lwkr  = term("rem_work")
    mopnr = f("neg", term("rem_ops"))
    return [spt, lpt, fifo, edd, cr, mwkr, lwkr, mopnr]

# =========================
# Scheduling with a rule
# =========================

def schedule_with_rule_return_assign(rule: Node, pb: Problem):
    """
    用指定规则进行整场贪心调度，返回 (assign, makespan)。
    assign: dict[(j,k)] = (machine, start, end) 供甘特图使用
    """
    n_jobs, m = pb.n_jobs, pb.m
    next_op = np.zeros(n_jobs, dtype=np.int32)
    job_ready = np.zeros(n_jobs, dtype=np.float32)
    machine_ready = np.zeros(m, dtype=np.float32)

    def remaining_work(j, from_k):
        return float(sum(min(op.machine_time.values()) for op in pb.jobs[j].ops[from_k:]))
    rem_work = np.array([remaining_work(j, 0) for j in range(n_jobs)], dtype=np.float32)

    makespan = 0.0
    done_ops = 0
    total_ops = pb.n_ops_total
    assign: Dict[Tuple[int,int], Tuple[int,float,float]] = {}

    while done_ops < total_ops:
        cands = []
        for j in range(n_jobs):
            k = int(next_op[j])
            if k >= len(pb.jobs[j].ops): continue
            op = pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(machine_ready[mm]), float(job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        if not cands:
            tmin = min(list(machine_ready) + list(job_ready))
            machine_ready[:] = np.maximum(machine_ready, tmin)
            job_ready[:] = np.maximum(job_ready, tmin)
            continue

        now = min(c[4] for c in cands)
        best, best_score = None, float("inf")
        for (j, k, mm, pt, start, end) in cands:
            ctx = {
                "pt": pt,
                "jr": float(job_ready[j]),
                "mr": float(machine_ready[mm]),
                "now": now,
                "rem_work": float(rem_work[j]),
                "rem_ops": float(len(pb.jobs[j].ops) - k),
                "due": float(pb.jobs[j].due),
                "slack": float(pb.jobs[j].due - start),
            }
            s = rule.eval(ctx)
            if (s < best_score) or (abs(s-best_score)<1e-9 and end < (best[5] if best else 1e18)) or \
               (abs(s-best_score)<1e-9 and best and abs(end-best[5])<1e-9 and start < best[4]):
                best, best_score = (j, k, mm, pt, start, end), s

        j, k, mm, pt, start, end = best
        machine_ready[mm] = end
        job_ready[j] = end
        assign[(j,k)] = (mm, start, end)
        rem_work[j] = max(0.0, rem_work[j] - min(pb.jobs[j].ops[k].machine_time.values()))
        next_op[j] += 1
        done_ops += 1
        makespan = max(makespan, end)

    return assign, float(makespan)

def evaluate_rule_on_problem(rule: Node, pb: Problem) -> float:
    _, mk = schedule_with_rule_return_assign(rule, pb)
    return mk

# =========================
# GP (GA) Framework
# =========================

@dataclass
class Individual:
    tree: Node
    fitness: float

def tournament_select(rng: random.Random, pop: List[Individual], k: int = 3) -> Individual:
    return min(rng.sample(pop, k), key=lambda ind: ind.fitness)

def gp_run(
    pb_for_eval: Problem,
    rng_seed: int = 42,
    pop_size: int = 60,
    gens: int = 30,
    max_depth: int = 6,
    tourn_k: int = 3,
    cx_rate: float = 0.9,
    mut_rate: float = 0.25,
    elitism: int = 2,
    return_top_k: int = 6,
):
    rng = random.Random(rng_seed)

    # init population (seeds + random)
    pop: List[Individual] = []
    for t in seed_rules():
        fit = evaluate_rule_on_problem(t, pb_for_eval)
        pop.append(Individual(tree=t, fitness=fit))
    while len(pop) < pop_size:
        t = random_tree(rng, max_depth=max_depth)
        fit = evaluate_rule_on_problem(t, pb_for_eval)
        pop.append(Individual(tree=t, fitness=fit))

    best = min(pop, key=lambda ind: ind.fitness)
    best_hist = [best.fitness]
    print(f"[GP] Init best makespan: {best.fitness:.3f}  rule={best.tree}")

    for g in range(1, gens+1):
        new_pop: List[Individual] = []
        elites = sorted(pop, key=lambda ind: ind.fitness)[:elitism]
        new_pop.extend([Individual(e.tree.copy(), e.fitness) for e in elites])

        while len(new_pop) < pop_size:
            p1 = tournament_select(rng, pop, tourn_k)
            if rng.random() < cx_rate:
                p2 = tournament_select(rng, pop, tourn_k)
                c1_tree, c2_tree = subtree_crossover(rng, p1.tree, p2.tree, max_depth=max_depth)
            else:
                c1_tree, c2_tree = p1.tree.copy(), tournament_select(rng, pop, tourn_k).tree.copy()

            if rng.random() < mut_rate: c1_tree = subtree_mutation(rng, c1_tree, max_depth)
            if rng.random() < mut_rate: c2_tree = subtree_mutation(rng, c2_tree, max_depth)

            c1_fit = evaluate_rule_on_problem(c1_tree, pb_for_eval)
            new_pop.append(Individual(c1_tree, c1_fit))
            if len(new_pop) < pop_size:
                c2_fit = evaluate_rule_on_problem(c2_tree, pb_for_eval)
                new_pop.append(Individual(c2_tree, c2_fit))

        pop = new_pop
        cur_best = min(pop, key=lambda ind: ind.fitness)
        if cur_best.fitness < best.fitness:
            best = Individual(cur_best.tree.copy(), cur_best.fitness)
        best_hist.append(best.fitness)
        print(f"[GP] Gen {g:3d}: best makespan = {best.fitness:.3f}  rule={best.tree}")

    # top-k rules (dedup by string)
    sorted_pop = sorted(pop, key=lambda ind: ind.fitness)
    top_rules, seen = [], set()
    for ind in sorted_pop:
        s = str(ind.tree)
        if s in seen: continue
        top_rules.append((ind.tree.copy(), ind.fitness))
        seen.add(s)
        if len(top_rules) >= return_top_k: break

    return best, best_hist, top_rules

# =========================
# DQN with Rules-as-Actions
# =========================

class FJSPRuleEnv:
    """Actions = choose one rule from rule_list; environment applies the rule to pick a candidate to schedule."""
    def __init__(self, pb: Problem, rule_list: List[Node]):
        self.pb = pb
        self.rules = rule_list
        self.n_jobs, self.m = pb.n_jobs, pb.m
        self.max_ops = pb.max_ops_per_job()
        self.action_dim = len(self.rules)
        self.reset()

    def reset(self):
        self.next_op = np.zeros(self.n_jobs, dtype=np.int32)
        self.job_ready = np.zeros(self.n_jobs, dtype=np.float32)
        self.machine_ready = np.zeros(self.m, dtype=np.float32)
        self.makespan = 0.0
        self.done_ops = 0
        self.assign: Dict[Tuple[int,int], Tuple[int,float,float]] = {}
        # remaining work estimate
        self.rem_work = np.array([sum(min(op.machine_time.values()) for op in job.ops) for job in self.pb.jobs], dtype=np.float32)
        return self._state()

    def _state(self) -> np.ndarray:
        mr_max = float(np.max(self.machine_ready)) if self.m>0 else 1.0
        jr_max = float(np.max(self.job_ready)) if self.n_jobs>0 else 1.0
        rw_max = float(np.max(self.rem_work)) if self.n_jobs>0 else 1.0
        mr = self.machine_ready / (1.0 + max(mr_max, 1.0))
        jr = self.job_ready / (1.0 + max(jr_max, 1.0))
        noi = self.next_op.astype(np.float32) / max(float(self.max_ops), 1.0)
        rw = self.rem_work / (1.0 + max(rw_max, 1.0))
        due = np.array([job.due for job in self.pb.jobs], dtype=np.float32)
        dd_max = float(np.max(due)) if due.size else 1.0
        due = due / (1.0 + max(dd_max, 1.0))
        return np.concatenate([mr, jr, noi, rw, due], axis=0).astype(np.float32)

    def available_actions_mask(self) -> np.ndarray:
        return np.ones(self.action_dim, dtype=np.float32)

    def step(self, rule_idx: int):
        rule = self.rules[rule_idx]
        # build candidates
        cands = []
        for j in range(self.n_jobs):
            k = int(self.next_op[j])
            if k >= len(self.pb.jobs[j].ops): continue
            op = self.pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(self.machine_ready[mm]), float(self.job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        if not cands:
            return self._state(), -0.5, True, {}

        now = min(c[4] for c in cands)
        best, best_score = None, float("inf")
        for (j, k, mm, pt, start, end) in cands:
            ctx = {
                "pt": pt,
                "jr": float(self.job_ready[j]),
                "mr": float(self.machine_ready[mm]),
                "now": now,
                "rem_work": float(self.rem_work[j]),
                "rem_ops": float(len(self.pb.jobs[j].ops) - k),
                "due": float(self.pb.jobs[j].due),
                "slack": float(self.pb.jobs[j].due - start),
            }
            s = rule.eval(ctx)
            if (s < best_score) or (abs(s-best_score)<1e-9 and end < (best[5] if best else 1e18)) or \
               (abs(s-best_score)<1e-9 and best and abs(end-best[5])<1e-9 and start < best[4]):
                best, best_score = (j, k, mm, pt, start, end), s

        j, k, mm, pt, start, end = best
        self.machine_ready[mm] = end
        self.job_ready[j] = end
        self.assign[(j,k)] = (mm, start, end)
        old_mk = self.makespan
        self.makespan = max(self.makespan, end)
        self.next_op[j] += 1
        self.rem_work[j] = max(0.0, self.rem_work[j] - min(self.pb.jobs[j].ops[k].machine_time.values()))
        self.done_ops += 1

        reward = -(self.makespan - old_mk) - 0.01
        done = self.done_ops == self.pb.n_ops_total
        return self._state(), reward, done, {}

# DQN agent
class QNet(nn.Module):
    def __init__(self, state_dim: int, action_dim: int):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, 256), nn.ReLU(),
            nn.Linear(256, 256), nn.ReLU(),
            nn.Linear(256, action_dim)
        )
    def forward(self, x): return self.net(x)

class DQNAgent:
    def __init__(self, state_dim: int, action_dim: int, lr=1e-3, gamma=0.99,
                 eps_start=1.0, eps_end=0.05, eps_decay=0.997,
                 buffer_size=100_000, batch_size=256, target_tau=1.0):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.q = QNet(state_dim, action_dim).to(self.device)
        self.q_target = QNet(state_dim, action_dim).to(self.device)
        self.q_target.load_state_dict(self.q.state_dict())
        self.opt = optim.Adam(self.q.parameters(), lr=lr)
        self.gamma = gamma
        self.eps, self.eps_end, self.eps_decay = eps_start, eps_end, eps_decay
        self.batch_size, self.target_tau = batch_size, target_tau
        self.memory = deque(maxlen=buffer_size)

    def act(self, state: np.ndarray, valid_mask: np.ndarray) -> int:
        if np.random.rand() < self.eps:
            idxs = np.nonzero(valid_mask)[0]
            return int(np.random.choice(idxs))
        with torch.no_grad():
            s = torch.tensor(state, dtype=torch.float32, device=self.device).unsqueeze(0)
            qvals = self.q(s).squeeze(0).cpu().numpy()
            qvals[valid_mask < 0.5] = -1e-9  # 所有动作有效，此处仅保留接口
            return int(np.argmax(qvals))

    def remember(self, s, a, r, s2, d): self.memory.append((s,a,r,s2,d))

    def train_step(self):
        if len(self.memory) < self.batch_size: return 0.0
        batch = random.sample(self.memory, self.batch_size)
        s = torch.tensor(np.stack([b[0] for b in batch]), dtype=torch.float32, device=self.device)
        a = torch.tensor([b[1] for b in batch], dtype=torch.int64, device=self.device).unsqueeze(1)
        r = torch.tensor([b[2] for b in batch], dtype=torch.float32, device=self.device).unsqueeze(1)
        s2 = torch.tensor(np.stack([b[3] for b in batch]), dtype=torch.float32, device=self.device)
        d = torch.tensor([b[4] for b in batch], dtype=torch.float32, device=self.device).unsqueeze(1)

        q_curr = self.q(s).gather(1, a)
        with torch.no_grad():
            q_next_max = self.q_target(s2).max(dim=1, keepdim=True).values
            target = r + (1.0 - d) * self.gamma * q_next_max

        loss = nn.MSELoss()(q_curr, target)
        self.opt.zero_grad(); loss.backward()
        nn.utils.clip_grad_norm_(self.q.parameters(), 1.0)
        self.opt.step()

        if self.target_tau >= 1.0:
            self.q_target.load_state_dict(self.q.state_dict())
        else:
            for tp, p in zip(self.q_target.parameters(), self.q.parameters()):
                tp.data.copy_(tp.data*(1.0-self.target_tau) + p.data*self.target_tau)

        self.eps = max(self.eps_end, self.eps * self.eps_decay)
        return float(loss.item())

# =========================
# Plots
# =========================

def plot_convergence(best_hist: List[float], title: str):
    xs = np.arange(1, len(best_hist)+1)
    plt.figure(figsize=(7,4))
    plt.plot(xs, best_hist, marker="o")
    plt.xlabel("Generation"); plt.ylabel("Best makespan"); plt.title(title)
    plt.grid(True, alpha=0.3); plt.tight_layout(); plt.show()

def plot_dqn_learning(ep_mk: List[float], title: str):
    xs = np.arange(1, len(ep_mk)+1)
    plt.figure(figsize=(7,4))
    plt.plot(xs, ep_mk)
    plt.xlabel("Episode"); plt.ylabel("Episode makespan"); plt.title(title)
    plt.grid(True, alpha=0.3); plt.tight_layout(); plt.show()

def plot_gantt(pb: Problem, assign: Dict[Tuple[int,int], Tuple[int,float,float]], title: str, ax=None):
    bars = {mm: [] for mm in range(pb.m)}
    for (j,k), (mm,s,e) in assign.items():
        bars[mm].append((s, e-s, f"J{j}-O{k}"))
    if ax is None:
        fig, ax = plt.subplots(figsize=(12,5))
    yticks, ylabels = [], []
    for idx, mm in enumerate(sorted(bars.keys())):
        items = sorted(bars[mm], key=lambda x: x[0])
        for (start, dur, label) in items:
            ax.barh(idx, dur, left=start)
            ax.text(start+dur/2, idx, label, ha="center", va="center", fontsize=8)
        yticks.append(idx); ylabels.append(f"M{mm}")
    ax.set_yticks(yticks); ax.set_yticklabels(ylabels)
    ax.set_xlabel("Time"); ax.set_title(title)
    if ax is None:
        plt.tight_layout(); plt.show()

def plot_gantt_side_by_side(pb: Problem, assign_a, title_a, assign_b, title_b):
    fig, axes = plt.subplots(1, 2, figsize=(16,5), sharey=True)
    plot_gantt(pb, assign_a, title_a, ax=axes[0])
    plot_gantt(pb, assign_b, title_b, ax=axes[1])
    plt.tight_layout(); plt.show()

def plot_bar_compare(labels: List[str], values: List[float], title: str):
    plt.figure(figsize=(6,4))
    x = np.arange(len(labels))
    plt.bar(x, values)
    for i, v in enumerate(values):
        plt.text(i, v, f"{v:.1f}", ha="center", va="bottom")
    plt.xticks(x, labels)
    plt.ylabel("Makespan")
    plt.title(title)
    plt.tight_layout(); plt.show()

# =========================
# Pipeline: GP → Compare → DQN
# =========================

def pipeline(
    # FJSP size
    n_jobs=12, m=5, ops_per_job=4, seed=7,
    # GP params
    gp_pop=60, gp_gens=30, gp_max_depth=6, gp_top_k=6,
    # DQN params
    dqn_episodes=800, lr=1e-3, gamma=0.99, eps_end=0.05, eps_decay=0.997
):
    pb = build_demo_problem(n_jobs=n_jobs, m=m, ops_per_job=ops_per_job, seed=seed)

    # 1) GP search for composite rules
    best_ind, gp_hist, top_rules = gp_run(
        pb_for_eval=pb, rng_seed=42, pop_size=gp_pop, gens=gp_gens,
        max_depth=gp_max_depth, tourn_k=3, cx_rate=0.9, mut_rate=0.25,
        elitism=2, return_top_k=gp_top_k
    )
    print("\n[GP] === Best Composite Rule ===")
    print(best_ind.tree)
    print(f"[GP] Best makespan: {best_ind.fitness:.3f}")

    plot_convergence(gp_hist, title="GP convergence (best makespan per generation)")

    # 2) 在 GP 结束后，对比“最佳单一规则 vs 最佳组合规则”
    #    从经典种子规则中选单一最优（以同一问题 pb 为准）
    single_rule_list = seed_rules()
    single_mks = [(r, evaluate_rule_on_problem(r, pb)) for r in single_rule_list]
    best_single_rule, best_single_mk = min(single_mks, key=lambda t: t[1])
    print("\n[Compare] Best single rule:")
    print(best_single_rule, "  makespan=", best_single_mk)

    # 调度并获取两个甘特所需的 assign
    assign_single, mk_single = schedule_with_rule_return_assign(best_single_rule, pb)
    assign_combo, mk_combo = schedule_with_rule_return_assign(best_ind.tree, pb)
    # 并排甘特图
    plot_gantt_side_by_side(pb, assign_single, f"Single rule (mk={mk_single:.1f})",
                               assign_combo,  f"Composite rule (mk={mk_combo:.1f})")
    # 条形图对比
    plot_bar_compare(["Single", "Composite(GP best)"], [mk_single, mk_combo],
                     title="Makespan comparison: Single vs Composite")

    # 3) DQN: actions = top-K GP rules
    rule_list = [t for (t, fval) in top_rules]
    env = FJSPRuleEnv(pb, rule_list)
    state_dim = env._state().size
    agent = DQNAgent(state_dim, action_dim=len(rule_list),
                     lr=lr, gamma=gamma, eps_start=1.0, eps_end=eps_end, eps_decay=eps_decay,
                     buffer_size=100000, batch_size=256, target_tau=1.0)

    ep_makespan = []
    for ep in range(1, dqn_episodes+1):
        s = env.reset()
        mask = env.available_actions_mask()
        while True:
            a = agent.act(s, mask)
            s2, r, done, _ = env.step(a)
            agent.remember(s, a, r, s2, float(done))
            agent.train_step()
            s = s2
            if done:
                break
        ep_makespan.append(env.makespan)
        if ep % 50 == 0 or ep == 1:
            print(f"[DQN] Ep {ep:4d}  makespan={env.makespan:7.2f}  eps={agent.eps:5.3f}")

    plot_dqn_learning(ep_makespan, title="DQN learning curve (episode makespan)")

    # Final greedy evaluation (eps=0)
    prev_eps = agent.eps
    agent.eps = 0.0
    s = env.reset()
    while True:
        a = agent.act(s, env.available_actions_mask())
        s, r, done, _ = env.step(a)
        if done: break
    agent.eps = prev_eps

    print(f"\n[DQN] Final greedy makespan: {env.makespan:.2f}")
    for (j,k), (mm,s_t,e_t) in sorted(env.assign.items()):
        print(f"Job {j} Op {k} -> M{mm}, start={s_t:.1f}, end={e_t:.1f}")
    # 可选：最终调度甘特
    plot_gantt(pb, env.assign, title=f"GP→DQN schedule (makespan={env.makespan:.1f})")

# =========================
# Main
# =========================

def main():
    # 快速演示；如需 20×4×5：把 n_jobs=20, m=5, ops_per_job=4，并增大 gp_gens 与 dqn_episodes
    # pipeline(
    #     n_jobs=12, m=5, ops_per_job=4, seed=7,
    #     gp_pop=60, gp_gens=30, gp_max_depth=6, gp_top_k=6,
    #     dqn_episodes=800, lr=1e-3, gamma=0.99, eps_end=0.05, eps_decay=0.997
    # )
    pipeline(
        n_jobs=30, m=5, ops_per_job=4, seed=7,
        gp_pop=60, gp_gens=300, gp_max_depth=6, gp_top_k=6,
        dqn_episodes=800, lr=1e-3, gamma=0.99, eps_end=0.05, eps_decay=0.997
    )

if __name__ == "__main__":
    main()
