#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T10.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/17 19:53 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Learn-to-Schedule for FJSP via Imitation Learning
-------------------------------------------------
目标：
  在小/中规模实例上，用一个“老师策略(oracle)”生成步骤级监督信号，
  训练一个可泛化的“候选打分器”，用于更大规模实例调度。

依赖：
  pip install torch numpy matplotlib

说明：
  - 老师策略 oracle_select()：对每个可派工候选做 1 步模拟，并用一个强贪心
    完成剩余调度，估计最终 makespan，选 makespan 最小的候选作为标签。
  - 模型 score_net：输入全局统计特征 + 候选特征，对每个候选输出一个分数；
    训练用 masked cross-entropy（在可行候选集合上 softmax）。
  - 评估：与 SPT/MWKR/MOPNR/ECT 基线对比，打印 makespan，并画甘特图。

作者：你
"""

import math
import random
from dataclasses import dataclass
from typing import Dict, List, Tuple, Optional
import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader


# =========================
# 问题定义：FJSP
# =========================

@dataclass
class Operation:
    machine_time: Dict[int, int]  # 可行机器 -> 加工时间

@dataclass
class Job:
    ops: List[Operation]
    due: float = 0.0  # 可不用；保留字段

@dataclass
class Problem:
    jobs: List[Job]
    m: int  # 机器数

    @property
    def n_jobs(self): return len(self.jobs)

    @property
    def n_ops_total(self): return sum(len(j.ops) for j in self.jobs)


def build_random_problem(n_jobs: int, m: int, ops_per_job: int,
                         seed: Optional[int] = None) -> Problem:
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        est = 0
        for _o in range(ops_per_job):
            k = 2 if m >= 2 else 1
            machines = sorted(rng.sample(range(m), k=k))
            mt = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(mt))
            est += min(mt.values())
        due = est + rng.randint(5, 20)
        jobs.append(Job(ops=ops, due=float(due)))
    return Problem(jobs=jobs, m=m)


# =========================
# 调度环境工具
# =========================

class FJSPState:
    """记录进度（就绪时间/下一工序/机器时间线），支持复制以便前瞻模拟"""
    def __init__(self, pb: Problem):
        self.pb = pb
        self.n_jobs = pb.n_jobs
        self.m = pb.m
        self.next_op = np.zeros(self.n_jobs, dtype=np.int32)
        self.job_ready = np.zeros(self.n_jobs, dtype=np.float32)
        self.mach_ready = np.zeros(self.m, dtype=np.float32)
        # 估计剩余工作量（最短可行时间和）
        self.rem_work = np.array([
            sum(min(op.machine_time.values()) for op in job.ops)
            for job in pb.jobs
        ], dtype=np.float32)
        self.done_ops = 0
        self.total_ops = pb.n_ops_total
        self.assign: Dict[Tuple[int,int], Tuple[int,float,float]] = {}
        self.makespan = 0.0

    def clone(self):
        s = FJSPState(self.pb)
        s.next_op = self.next_op.copy()
        s.job_ready = self.job_ready.copy()
        s.mach_ready = self.mach_ready.copy()
        s.rem_work = self.rem_work.copy()
        s.done_ops = self.done_ops
        s.total_ops = self.total_ops
        s.assign = dict(self.assign)
        s.makespan = self.makespan
        return s

    def candidates(self):
        """列出可派工候选：(j, k, m, pt, start, end)"""
        cands = []
        for j in range(self.n_jobs):
            k = int(self.next_op[j])
            if k >= len(self.pb.jobs[j].ops): continue
            op = self.pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(self.mach_ready[mm]), float(self.job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        return cands

    def step_assign(self, cand):
        """执行一个候选：更新时间线与记录"""
        j, k, mm, pt, start, end = cand
        self.mach_ready[mm] = end
        self.job_ready[j] = end
        self.assign[(j, k)] = (mm, start, end)
        self.makespan = max(self.makespan, end)
        self.next_op[j] += 1
        # 更新剩余工作量估计
        min_time_this_op = min(self.pb.jobs[j].ops[k].machine_time.values())
        self.rem_work[j] = max(0.0, self.rem_work[j] - float(min_time_this_op))
        self.done_ops += 1

    def done(self):
        return self.done_ops >= self.total_ops


# =========================
# 基线规则 & 贪心完成器
# =========================

def rule_score(name: str, ctx: dict) -> float:
    """候选评分（越小越好）"""
    if name == "SPT":   return ctx["pt"]
    if name == "MWKR":  return -ctx["rem_work"]
    if name == "MOPNR": return -(ctx["rem_ops"])
    if name == "ECT":   return ctx["end"]  # 最早完成时间
    # 默认
    return ctx["end"]

BASELINE_RULES = ["SPT", "MWKR", "MOPNR", "ECT"]

def greedy_finish(state: FJSPState, rule_name: str) -> float:
    """从当前 state 出发，用某条规则贪心完成，返回 makespan"""
    s = state.clone()
    while not s.done():
        cands = s.candidates()
        now = min(c[4] for c in cands) if cands else 0.0
        best, best_score = None, float("inf")
        for (j, k, mm, pt, start, end) in cands:
            ctx = {
                "pt": pt,
                "jr": float(s.job_ready[j]),
                "mr": float(s.mach_ready[mm]),
                "now": now,
                "rem_work": float(s.rem_work[j]),
                "rem_ops": float(len(s.pb.jobs[j].ops) - k),
                "due": float(s.pb.jobs[j].due),
                "start": start,
                "end": end,
            }
            sc = rule_score(rule_name, ctx)
            if (sc < best_score) or (abs(sc-best_score)<1e-9 and end < (best[5] if best else 1e18)):
                best, best_score = (j,k,mm,pt,start,end), sc
        s.step_assign(best)
    return s.makespan

def evaluate_rule_on_problem(pb: Problem, rule_name: str) -> Tuple[float, Dict]:
    s = FJSPState(pb)
    while not s.done():
        cands = s.candidates()
        now = min(c[4] for c in cands) if cands else 0.0
        best, best_score = None, float("inf")
        for (j, k, mm, pt, start, end) in cands:
            ctx = {
                "pt": pt,
                "jr": float(s.job_ready[j]),
                "mr": float(s.mach_ready[mm]),
                "now": now,
                "rem_work": float(s.rem_work[j]),
                "rem_ops": float(len(s.pb.jobs[j].ops) - k),
                "due": float(s.pb.jobs[j].due),
                "start": start,
                "end": end,
            }
            sc = rule_score(rule_name, ctx)
            if (sc < best_score) or (abs(sc-best_score)<1e-9 and end < (best[5] if best else 1e18)):
                best, best_score = (j,k,mm,pt,start,end), sc
        s.step_assign(best)
    return s.makespan, s.assign


# =========================
# 老师策略（一步前瞻 + 强贪心收尾）
# =========================

def oracle_select(state: FJSPState, tail_rule: str = "ECT") -> int:
    """
    在当前 state 上，对每个候选做一次“假执行”，然后用 tail_rule 贪心完成，
    估计最终 makespan，返回“最优候选”的索引（作为训练标签）。
    """
    cands = state.candidates()
    best_idx, best_mk = 0, float("inf")
    for idx, cand in enumerate(cands):
        s2 = state.clone()
        s2.step_assign(cand)
        mk_est = greedy_finish(s2, tail_rule)
        if mk_est < best_mk:
            best_mk = mk_est
            best_idx = idx
    return best_idx


# =========================
# 特征工程
# =========================

def global_stats(state: FJSPState) -> np.ndarray:
    """把可变长向量汇总成定长统计（均值/最大/最小/标准差/比例）"""
    jr = state.job_ready
    mr = state.mach_ready
    rw = state.rem_work
    noi = state.next_op.astype(np.float32)
    # 防NaN
    def stats(vec):
        if vec.size == 0: return [0,0,0,0]
        return [float(np.mean(vec)), float(np.max(vec)), float(np.min(vec)), float(np.std(vec)+1e-8)]
    feats = []
    feats += stats(jr)
    feats += stats(mr)
    feats += stats(rw)
    feats += stats(noi)
    # 完成比例、总工序、机器数
    feats += [state.done_ops / max(1, state.total_ops), float(state.total_ops), float(state.m)]
    return np.array(feats, dtype=np.float32)

def cand_features(state: FJSPState, cand) -> np.ndarray:
    """候选特征：pt/start/end/slack/该作业剩余工序数/剩余工作量等"""
    j, k, mm, pt, start, end = cand
    rem_ops = len(state.pb.jobs[j].ops) - k
    slack = float(state.pb.jobs[j].due - start)
    feats = [
        pt, start, end, slack,
        float(state.job_ready[j]),
        float(state.mach_ready[mm]),
        float(state.rem_work[j]),
        float(rem_ops),
        float(mm),
    ]
    # 归一化/尺度抑制：用当前观测的分位作为缩放，也可简单 log/标准化
    # 这里简单对时间特征做 log1p
    feats[0] = math.log1p(feats[0])
    feats[1] = math.log1p(max(0.0, feats[1]))
    feats[2] = math.log1p(max(0.0, feats[2]))
    feats[3] = math.log1p(max(0.0, feats[3]))
    feats[4] = math.log1p(max(0.0, feats[4]))
    feats[5] = math.log1p(max(0.0, feats[5]))
    feats[6] = math.log1p(max(0.0, feats[6]))
    return np.array(feats, dtype=np.float32)


# =========================
# 数据集构建（模仿学习）
# =========================

class StepDataset(Dataset):
    """
    存储步骤级样本：
      global_feat: [G]
      cand_feat:   [C, D]
      mask:        [C] (1=有效候选, 0=pad)
      label:       标注的候选索引
    """
    def __init__(self):
        self.items = []

    def __len__(self): return len(self.items)

    def __getitem__(self, idx): return self.items[idx]

    def add_item(self, g, c, m, y):
        self.items.append({
            "g": g.astype(np.float32),
            "c": c.astype(np.float32),
            "m": m.astype(np.float32),
            "y": int(y),
        })

def collate_batch(batch):
    # 找最大候选数，pad 其他样本
    maxC = max(item["c"].shape[0] for item in batch)
    G = []
    C = []
    M = []
    Y = []
    for item in batch:
        g = item["g"]
        c = item["c"]
        m = item["m"]
        pad = maxC - c.shape[0]
        if pad > 0:
            c = np.pad(c, ((0,pad),(0,0)), mode="constant", constant_values=0.0)
            m = np.pad(m, ((0,pad)), mode="constant", constant_values=0.0)
        G.append(g); C.append(c); M.append(m); Y.append(item["y"])
    G = torch.tensor(np.stack(G), dtype=torch.float32)
    C = torch.tensor(np.stack(C), dtype=torch.float32)
    M = torch.tensor(np.stack(M), dtype=torch.float32)
    Y = torch.tensor(Y, dtype=torch.long)
    return G, C, M, Y


def build_training_data(n_instances: int = 200,
                        n_jobs_range=(6, 10),
                        m: int = 5,
                        ops_per_job: int = 4,
                        seed: int = 0) -> StepDataset:
    """
    生成训练数据：
      - 随机小/中规模实例
      - 用 oracle 在每一步给出“正确候选”
      - 提取特征并存储
    """
    rng = random.Random(seed)
    ds = StepDataset()
    for i in range(n_instances):
        n_jobs = rng.randint(n_jobs_range[0], n_jobs_range[1])
        pb = build_random_problem(n_jobs=n_jobs, m=m, ops_per_job=ops_per_job, seed=rng.randint(0, 10**9))
        st = FJSPState(pb)
        while not st.done():
            cands = st.candidates()
            if not cands:
                break
            # 全局/候选特征
            g = global_stats(st)
            cf = np.stack([cand_features(st, c) for c in cands], axis=0)
            mask = np.ones(cf.shape[0], dtype=np.float32)
            # 老师选择
            y = oracle_select(st, tail_rule="ECT")
            # 存样本
            ds.add_item(g, cf, mask, y)
            # 执行老师动作推进
            st.step_assign(cands[y])
    return ds


# =========================
# 模型
# =========================

class ScoreNet(nn.Module):
    """
    候选打分器：
      - 将全局特征 g 过 MLP 得到上下文向量
      - 将候选特征 c_i 与上下文拼接，过 MLP 输出每个候选的分数
    """
    def __init__(self, g_dim: int, c_dim: int, hidden: int = 128):
        super().__init__()
        self.ctx = nn.Sequential(
            nn.Linear(g_dim, hidden), nn.ReLU(),
            nn.Linear(hidden, hidden), nn.ReLU(),
        )
        self.cand = nn.Sequential(
            nn.Linear(c_dim + hidden, hidden), nn.ReLU(),
            nn.Linear(hidden, hidden), nn.ReLU(),
            nn.Linear(hidden, 1)  # 标量分数
        )

    def forward(self, G: torch.Tensor, C: torch.Tensor, mask: torch.Tensor):
        """
        G: [B, Gdim]
        C: [B, Cmax, Cdim]
        mask: [B, Cmax] (1=valid, 0=pad)
        return: scores [B, Cmax]（pad 位置输出 -1e9 屏蔽）
        """
        B, Cmax, Cdim = C.shape
        ctx = self.ctx(G)                            # [B, H]
        ctx_rep = ctx.unsqueeze(1).expand(-1, Cmax, -1)     # [B, Cmax, H]
        x = torch.cat([C, ctx_rep], dim=-1)                # [B, Cmax, H+Cdim]
        s = self.cand(x).squeeze(-1)                       # [B, Cmax]
        # mask 无效位置
        s = s.masked_fill(mask < 0.5, -1e9)
        return s


# =========================
# 训练 & 评估
# =========================

def train_model(train_ds: StepDataset, val_ds: StepDataset,
                epochs=10, batch_size=64, lr=1e-3, device=None):
    if device is None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 推断维度
    g_dim = train_ds.items[0]["g"].shape[0]
    c_dim = train_ds.items[0]["c"].shape[1]

    model = ScoreNet(g_dim, c_dim, hidden=128).to(device)
    opt = optim.Adam(model.parameters(), lr=lr)
    ce = nn.CrossEntropyLoss(reduction="none")  # 我们会用 mask做有效位的平均

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
    val_loader   = DataLoader(val_ds,   batch_size=batch_size, shuffle=False, collate_fn=collate_batch)

    tr_hist, va_hist = [], []

    for ep in range(1, epochs+1):
        model.train()
        total_loss, total_cnt, correct = 0.0, 0, 0
        for G, C, M, Y in train_loader:
            G, C, M, Y = G.to(device), C.to(device), M.to(device), Y.to(device)
            opt.zero_grad()
            scores = model(G, C, M)         # [B, Cmax]
            # 交叉熵：只对每个样本的有效候选集合有效，已经用 mask 屏蔽为 -1e9
            loss_vec = ce(scores, Y)
            loss = loss_vec.mean()
            loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            opt.step()

            total_loss += float(loss.item()) * G.size(0)
            total_cnt  += G.size(0)
            pred = scores.argmax(dim=1)
            correct += int((pred == Y).sum().item())

        train_loss = total_loss / max(1, total_cnt)
        train_acc  = correct / max(1, total_cnt)

        # 验证
        model.eval()
        v_total_loss, v_total_cnt, v_correct = 0.0, 0, 0
        with torch.no_grad():
            for G, C, M, Y in val_loader:
                G, C, M, Y = G.to(device), C.to(device), M.to(device), Y.to(device)
                scores = model(G, C, M)
                loss_vec = ce(scores, Y)
                loss = loss_vec.mean()
                v_total_loss += float(loss.item()) * G.size(0)
                v_total_cnt  += G.size(0)
                pred = scores.argmax(dim=1)
                v_correct += int((pred == Y).sum().item())
        val_loss = v_total_loss / max(1, v_total_cnt)
        val_acc  = v_correct / max(1, v_total_cnt)

        tr_hist.append(train_loss); va_hist.append(val_loss)
        print(f"[Epoch {ep:02d}] train_loss={train_loss:.4f} acc={train_acc:.3f} | "
              f"val_loss={val_loss:.4f} acc={val_acc:.3f}")

    return model, tr_hist, va_hist


def plot_learning_curve(tr, va, title="Training curve"):
    xs = np.arange(1, len(tr)+1)
    plt.figure(figsize=(7,4))
    plt.plot(xs, tr, label="train")
    plt.plot(xs, va, label="val")
    plt.xlabel("Epoch"); plt.ylabel("Loss")
    plt.title(title); plt.grid(True, alpha=0.3); plt.legend()
    plt.tight_layout(); plt.show()


# =========================
# 用模型调度
# =========================

def schedule_with_model(pb: Problem, model: ScoreNet, device=None) -> Tuple[float, Dict]:
    if device is None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    st = FJSPState(pb)
    model.eval()
    with torch.no_grad():
        while not st.done():
            cands = st.candidates()
            g = torch.tensor(global_stats(st), dtype=torch.float32).unsqueeze(0).to(device)       # [1,G]
            cf = np.stack([cand_features(st, c) for c in cands], axis=0)
            c = torch.tensor(cf, dtype=torch.float32).unsqueeze(0).to(device)                     # [1,C, D]
            m = torch.ones((1, c.shape[1]), dtype=torch.float32).to(device)                       # [1,C]
            scores = model(g, c, m).squeeze(0)                                                    # [C]
            idx = int(torch.argmax(scores).item())
            st.step_assign(cands[idx])
    return st.makespan, st.assign


# =========================
# 可视化：甘特图
# =========================

def plot_gantt(pb: Problem, assign: Dict[Tuple[int,int], Tuple[int,float,float]], title: str):
    bars = {mm: [] for mm in range(pb.m)}
    for (j,k), (mm,s,e) in assign.items():
        bars[mm].append((s, e-s, f"J{j}-O{k}"))
    fig, ax = plt.subplots(figsize=(12, 0.7*pb.m + 3))
    yticks, ylabels = [], []
    for idx, mm in enumerate(sorted(bars.keys())):
        items = sorted(bars[mm], key=lambda x: x[0])
        for (start, dur, label) in items:
            ax.barh(idx, dur, left=start)
            ax.text(start+dur/2, idx, label, ha="center", va="center", fontsize=8)
        yticks.append(idx); ylabels.append(f"M{mm}")
    ax.set_yticks(yticks); ax.set_yticklabels(ylabels)
    ax.set_xlabel("Time"); ax.set_title(title)
    ax.grid(axis="x", alpha=0.3)
    plt.tight_layout(); plt.show()


# =========================
# 主流程
# =========================

def main():
    np.random.seed(0)
    random.seed(0)
    torch.manual_seed(0)

    # 1) 构建训练/验证数据（小/中规模）
    print("Building training data ...")
    train_ds = build_training_data(n_instances=250, n_jobs_range=(6,10), m=5, ops_per_job=4, seed=123)
    val_ds   = build_training_data(n_instances=60,  n_jobs_range=(8,10), m=5, ops_per_job=4, seed=456)

    # 2) 训练模型
    print("Training model ...")
    model, tr_hist, va_hist = train_model(train_ds, val_ds, epochs=12, batch_size=64, lr=1e-3)
    plot_learning_curve(tr_hist, va_hist, title="Imitation Learning - Loss")

    # 3) 在“同分布小规模”上与基线对比
    print("\n=== Small-scale evaluation (same distribution) ===")
    test_pb_small = build_random_problem(n_jobs=10, m=5, ops_per_job=4, seed=2024)
    mk_model, assign_model = schedule_with_model(test_pb_small, model)
    print(f"Model  makespan: {mk_model:.2f}")
    for r in BASELINE_RULES:
        mk, _ = evaluate_rule_on_problem(test_pb_small, r)
        print(f"{r:<6} makespan: {mk:.2f}")

    plot_gantt(test_pb_small, assign_model, title=f"Model schedule (small) mk={mk_model:.1f}")

    # 4) 泛化测试：更大规模
    print("\n=== Larger-scale generalization ===")
    test_pb_big = build_random_problem(n_jobs=20, m=5, ops_per_job=4, seed=2025)
    mk_model_big, assign_model_big = schedule_with_model(test_pb_big, model)
    print(f"Model  makespan (big): {mk_model_big:.2f}")
    for r in BASELINE_RULES:
        mk, _ = evaluate_rule_on_problem(test_pb_big, r)
        print(f"{r:<6} makespan (big): {mk:.2f}")

    plot_gantt(test_pb_big, assign_model_big, title=f"Model schedule (big) mk={mk_model_big:.1f}")

if __name__ == "__main__":
    main()
