#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T11.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/18 16:47 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
l2s_fjsp_modular.py
---------------------------------
模块化的“学会排序”的FJSP调度器（模仿学习 + 可选LNS修补）：
- data:    问题与状态建模、随机实例、老师策略（生成监督样本）
- model:   候选打分器抽象与MLP实现，保存/加载“思维模型”
- train:   训练器（supervised），学习曲线
- infer:   用思维模型逐步排程（含动作掩码/可行性），可选经验库辅助
- repair:  轻量局部搜索（换机/交换段）微调
- viz:     甘特图绘制
- main:    演示训练→评估→大规模泛化→保存/加载→推理

依赖：
  pip install torch numpy matplotlib
"""

import math
import random
from dataclasses import dataclass
from typing import Dict, List, Tuple, Optional
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader


# =========================
# data 子模块：问题、状态、老师
# =========================

@dataclass
class Operation:
    machine_time: Dict[int, int]  # 可行机 -> 加工时间

@dataclass
class Job:
    ops: List[Operation]
    due: float = 0.0  # 可选

@dataclass
class Problem:
    jobs: List[Job]
    m: int

    @property
    def n_jobs(self): return len(self.jobs)

    @property
    def n_ops_total(self): return sum(len(j.ops) for j in self.jobs)

def build_random_problem(n_jobs: int, m: int, ops_per_job: int,
                         seed: Optional[int] = None) -> Problem:
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        est = 0
        for _o in range(ops_per_job):
            k = 2 if m >= 2 else 1
            machines = sorted(rng.sample(range(m), k=k))
            mt = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(mt))
            est += min(mt.values())
        due = est + rng.randint(5, 20)
        jobs.append(Job(ops=ops, due=float(due)))
    return Problem(jobs=jobs, m=m)

class FJSPState:
    """调度状态"""
    def __init__(self, pb: Problem):
        self.pb = pb
        self.n_jobs = pb.n_jobs
        self.m = pb.m
        self.next_op = np.zeros(self.n_jobs, dtype=np.int32)
        self.job_ready = np.zeros(self.n_jobs, dtype=np.float32)
        self.mach_ready = np.zeros(self.m, dtype=np.float32)
        self.rem_work = np.array([
            sum(min(op.machine_time.values()) for op in job.ops)
            for job in pb.jobs
        ], dtype=np.float32)
        self.done_ops = 0
        self.total_ops = pb.n_ops_total
        self.assign: Dict[Tuple[int,int], Tuple[int,float,float]] = {}
        self.makespan = 0.0

    def clone(self):
        s = FJSPState(self.pb)
        s.next_op = self.next_op.copy()
        s.job_ready = self.job_ready.copy()
        s.mach_ready = self.mach_ready.copy()
        s.rem_work = self.rem_work.copy()
        s.done_ops = self.done_ops
        s.total_ops = self.total_ops
        s.assign = dict(self.assign)
        s.makespan = self.makespan
        return s

    def candidates(self):
        """列出可派工候选：(j, k, m, pt, start, end)"""
        cands = []
        for j in range(self.n_jobs):
            k = int(self.next_op[j])
            if k >= len(self.pb.jobs[j].ops): continue
            op = self.pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(self.mach_ready[mm]), float(self.job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        return cands

    def step_assign(self, cand):
        j, k, mm, pt, start, end = cand
        self.mach_ready[mm] = end
        self.job_ready[j] = end
        self.assign[(j, k)] = (mm, start, end)
        self.makespan = max(self.makespan, end)
        self.next_op[j] += 1
        min_time_this_op = min(self.pb.jobs[j].ops[k].machine_time.values())
        self.rem_work[j] = max(0.0, self.rem_work[j] - float(min_time_this_op))
        self.done_ops += 1

    def done(self):
        return self.done_ops >= self.total_ops

# --- 基线规则用于老师收尾/对比 ---

def rule_score(name: str, ctx: dict) -> float:
    if name == "SPT":   return ctx["pt"]
    if name == "MWKR":  return -ctx["rem_work"]
    if name == "MOPNR": return -(ctx["rem_ops"])
    if name == "ECT":   return ctx["end"]
    return ctx["end"]

BASELINE_RULES = ["SPT", "MWKR", "MOPNR", "ECT"]

def greedy_finish(state: FJSPState, rule_name: str) -> float:
    s = state.clone()
    while not s.done():
        cands = s.candidates()
        best, best_score = None, float("inf")
        for (j,k,mm,pt,st,en) in cands:
            ctx = {
                "pt": pt, "rem_work": float(s.rem_work[j]),
                "rem_ops": float(len(s.pb.jobs[j].ops) - k), "end": en
            }
            sc = rule_score(rule_name, ctx)
            if (sc < best_score) or (abs(sc-best_score)<1e-9 and en < (best[5] if best else 1e18)):
                best, best_score = (j,k,mm,pt,st,en), sc
        s.step_assign(best)
    return s.makespan

def oracle_select(state: FJSPState, tail_rule="ECT") -> int:
    """老师：一步前瞻 + 强贪心收尾"""
    cands = state.candidates()
    best_idx, best_mk = 0, float("inf")
    for idx, cand in enumerate(cands):
        s2 = state.clone()
        s2.step_assign(cand)
        mk_est = greedy_finish(s2, tail_rule)
        if mk_est < best_mk:
            best_mk = mk_est
            best_idx = idx
    return best_idx

# --- 特征工程 ---

def global_stats(state: FJSPState) -> np.ndarray:
    jr, mr, rw = state.job_ready, state.mach_ready, state.rem_work
    noi = state.next_op.astype(np.float32)
    def stats(vec):
        if vec.size == 0: return [0,0,0,0]
        return [float(np.mean(vec)), float(np.max(vec)), float(np.min(vec)), float(np.std(vec)+1e-8)]
    feats = []
    feats += stats(jr); feats += stats(mr); feats += stats(rw); feats += stats(noi)
    feats += [state.done_ops / max(1, state.total_ops), float(state.total_ops), float(state.m)]
    return np.array(feats, dtype=np.float32)

def cand_features(state: FJSPState, cand) -> np.ndarray:
    j, k, mm, pt, start, end = cand
    rem_ops = len(state.pb.jobs[j].ops) - k
    slack = float(state.pb.jobs[j].due - start)
    feats = [
        pt, start, end, slack,
        float(state.job_ready[j]),
        float(state.mach_ready[mm]),
        float(state.rem_work[j]),
        float(rem_ops),
        float(mm),
    ]
    # 简单log抑制尺度
    for t in range(7):
        feats[t] = math.log1p(max(0.0, feats[t]))
    return np.array(feats, dtype=np.float32)

# --- 数据集：步骤级监督样本 ---

class StepDataset(Dataset):
    def __init__(self): self.items=[]
    def __len__(self): return len(self.items)
    def __getitem__(self, idx): return self.items[idx]
    def add_item(self, g, c, m, y, embed=None):
        self.items.append({"g":g.astype(np.float32),
                           "c":c.astype(np.float32),
                           "m":m.astype(np.float32),
                           "y":int(y),
                           "e":None if embed is None else embed.astype(np.float32)})

def collate_batch(batch):
    maxC = max(b["c"].shape[0] for b in batch)
    G=[]; C=[]; M=[]; Y=[]; E=[]
    for b in batch:
        g,c,m,y,e = b["g"], b["c"], b["m"], b["y"], b.get("e",None)
        pad = maxC - c.shape[0]
        if pad>0:
            c = np.pad(c, ((0,pad),(0,0)), mode="constant", constant_values=0.0)
            m = np.pad(m, ((0,pad)), mode="constant", constant_values=0.0)
        G.append(g); C.append(c); M.append(m); Y.append(y)
        E.append(np.zeros(1,dtype=np.float32) if e is None else e)
    return (torch.tensor(np.stack(G),dtype=torch.float32),
            torch.tensor(np.stack(C),dtype=torch.float32),
            torch.tensor(np.stack(M),dtype=torch.float32),
            torch.tensor(Y,dtype=torch.long),
            torch.tensor(np.stack(E),dtype=torch.float32))

def build_training_data(n_instances: int = 200,
                        n_jobs_range=(6, 10),
                        m: int = 5,
                        ops_per_job: int = 4,
                        seed: int = 0) -> StepDataset:
    rng = random.Random(seed)
    ds = StepDataset()
    for _ in range(n_instances):
        n_jobs = rng.randint(n_jobs_range[0], n_jobs_range[1])
        pb = build_random_problem(n_jobs, m, ops_per_job, seed=rng.randint(0,1<<30))
        st = FJSPState(pb)
        while not st.done():
            cands = st.candidates()
            g = global_stats(st)
            cf = np.stack([cand_features(st, c) for c in cands], axis=0)
            mask = np.ones(cf.shape[0], dtype=np.float32)
            y = oracle_select(st, tail_rule="ECT")
            ds.add_item(g, cf, mask, y)
            st.step_assign(cands[y])
    return ds


# =========================
# model 子模块：思维模型 & 存取
# =========================

class ScoreModel(nn.Module):
    """抽象：输入全局+候选批，输出每候选分数"""
    def forward(self, G:torch.Tensor, C:torch.Tensor, mask:torch.Tensor) -> torch.Tensor:
        raise NotImplementedError
    def embed_state(self, G:torch.Tensor) -> torch.Tensor:
        """导出“思维嵌入”便于经验检索（简易版）"""
        raise NotImplementedError

class MLPScore(ScoreModel):
    def __init__(self, g_dim:int, c_dim:int, hidden:int=128, embed_dim:int=128):
        super().__init__()
        self.ctx = nn.Sequential(
            nn.Linear(g_dim, hidden), nn.ReLU(),
            nn.Linear(hidden, embed_dim), nn.ReLU(),
        )
        self.cand = nn.Sequential(
            nn.Linear(c_dim + embed_dim, hidden), nn.ReLU(),
            nn.Linear(hidden, hidden), nn.ReLU(),
            nn.Linear(hidden, 1)
        )
        self.embed_dim = embed_dim

    def forward(self, G,C,mask):
        B,Cmax,Cdim = C.shape
        ctx = self.ctx(G)                         # [B,E]
        ctx_rep = ctx.unsqueeze(1).expand(-1,Cmax,-1)
        x = torch.cat([C, ctx_rep], dim=-1)       # [B,Cmax,E+Cdim]
        s = self.cand(x).squeeze(-1)              # [B,Cmax]
        s = s.masked_fill(mask < 0.5, -1e9)
        return s

    def embed_state(self, G):
        with torch.no_grad():
            return self.ctx(G)

def save_thinking_model(model:ScoreModel, path:str, norm_dict:dict=None):
    os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
    torch.save({"state_dict":model.state_dict(), "cls":"MLPScore",
                "norm":norm_dict or {}}, path)

def load_thinking_model(path:str, g_dim:int, c_dim:int, device=None) -> Tuple[ScoreModel, dict]:
    device = device or ("cuda" if torch.cuda.is_available() else "cpu")
    ckpt = torch.load(path, map_location=device)
    model = MLPScore(g_dim, c_dim, hidden=128, embed_dim=128)
    model.load_state_dict(ckpt["state_dict"])
    model.to(device)
    return model, ckpt.get("norm",{})


# =========================
# train 子模块：训练器
# =========================

class Trainer:
    def __init__(self, g_dim, c_dim, device=None):
        self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = MLPScore(g_dim, c_dim, hidden=128, embed_dim=128).to(self.device)
        self.opt = optim.Adam(self.model.parameters(), lr=1e-3)
        self.crit = nn.CrossEntropyLoss()

    def fit(self, train_ds:StepDataset, val_ds:StepDataset, epochs=10, batch_size=64):
        tr_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
        va_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False, collate_fn=collate_batch)
        tr_hist, va_hist = [], []
        for ep in range(1, epochs+1):
            self.model.train(); tot=0; corr=0; cnt=0
            for G,C,M,Y,E in tr_loader:
                G,C,M,Y = G.to(self.device),C.to(self.device),M.to(self.device),Y.to(self.device)
                self.opt.zero_grad()
                s = self.model(G,C,M)
                loss = self.crit(s, Y)
                loss.backward()
                nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                self.opt.step()
                tot += float(loss.item())*G.size(0); cnt+=G.size(0)
                corr += int((s.argmax(1)==Y).sum().item())
            tr_loss = tot/max(1,cnt); tr_acc = corr/max(1,cnt)
            # val
            self.model.eval(); vtot=0; vcnt=0; vcorr=0
            with torch.no_grad():
                for G,C,M,Y,E in va_loader:
                    G,C,M,Y = G.to(self.device),C.to(self.device),M.to(self.device),Y.to(self.device)
                    s = self.model(G,C,M); loss = self.crit(s,Y)
                    vtot += float(loss.item())*G.size(0); vcnt+=G.size(0)
                    vcorr += int((s.argmax(1)==Y).sum().item())
            va_loss = vtot/max(1,vcnt); va_acc = vcorr/max(1,vcnt)
            tr_hist.append(tr_loss); va_hist.append(va_loss)
            print(f"[Epoch {ep:02d}] train_loss={tr_loss:.4f} acc={tr_acc:.3f} | val_loss={va_loss:.4f} acc={va_acc:.3f}")
        return tr_hist, va_hist

def plot_learning_curve(tr, va, title="Training"):
    xs = np.arange(1, len(tr)+1)
    plt.figure(figsize=(7,4))
    plt.plot(xs, tr, label="train")
    plt.plot(xs, va, label="val")
    plt.xlabel("Epoch"); plt.ylabel("Loss"); plt.title(title)
    plt.grid(True, alpha=0.3); plt.legend(); plt.tight_layout(); plt.show()


# =========================
# infer 子模块：调度（含经验库可选）
# =========================

class SimpleMemory:
    """超轻量“经验库”：存 (state_embed, action_index)；近邻检索做先验加权"""
    def __init__(self, dim:int):
        self.dim = dim
        self.vecs = np.empty((0, dim), dtype=np.float32)
        self.acts = np.empty((0, 1), dtype=np.int64)

    def add(self, vec:np.ndarray, act:int):
        self.vecs = np.vstack([self.vecs, vec.reshape(1,-1)])
        self.acts = np.vstack([self.acts, np.array([[act]],dtype=np.int64)])

    def topk(self, q:np.ndarray, k:int=8) -> List[int]:
        if self.vecs.shape[0]==0: return []
        q = q.reshape(1,-1)
        # L2 最近邻
        d = np.sum((self.vecs - q)**2, axis=1)
        idx = np.argsort(d)[:min(k, len(d))]
        return [int(self.acts[i,0]) for i in idx]

class Scheduler:
    def __init__(self, model:ScoreModel, device=None, memory:Optional[SimpleMemory]=None, alpha_mem:float=0.2):
        self.model = model
        self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.memory = memory
        self.alpha_mem = alpha_mem

    def schedule(self, pb:Problem, use_memory:bool=True) -> Tuple[float, Dict]:
        st = FJSPState(pb)
        self.model.eval()
        with torch.no_grad():
            while not st.done():
                cands = st.candidates()
                g = torch.tensor(global_stats(st),dtype=torch.float32).unsqueeze(0).to(self.device)
                cf = np.stack([cand_features(st, c) for c in cands], axis=0)
                c = torch.tensor(cf,dtype=torch.float32).unsqueeze(0).to(self.device)
                m = torch.ones((1, c.shape[1]), dtype=torch.float32).to(self.device)
                scores = self.model(g,c,m).squeeze(0).cpu().numpy()  # [C]

                # 经验库先验（可选）：近邻投票→拉高被投得多的候选
                if use_memory and self.memory is not None:
                    emb = self.model.embed_state(g).cpu().numpy().reshape(-1)
                    neigh = self.memory.topk(emb, k=8)
                    if len(neigh)>0:
                        prior = np.zeros_like(scores)
                        for a in neigh:
                            if a < len(scores):
                                prior[a] += 1.0
                        if prior.sum()>0:
                            prior = prior / (prior.sum()+1e-9)
                            scores = (1-self.alpha_mem)*scores + self.alpha_mem*(np.max(scores)-scores.max()*0 + (1-prior))  # 让prior高的更低分（越低越好）
                idx = int(np.argmax(scores))
                st.step_assign(cands[idx])

                # 记录经验
                if self.memory is not None:
                    emb = self.model.embed_state(torch.tensor(global_stats(st),dtype=torch.float32).unsqueeze(0).to(self.device)).cpu().numpy().reshape(-1)
                    self.memory.add(emb, idx)

        return st.makespan, st.assign


# =========================
# repair 子模块：轻量局部搜索（可选）
# =========================

def local_repair_swap(pb:Problem, assign:Dict[Tuple[int,int],Tuple[int,float,float]],
                      tries:int=200) -> Tuple[float, Dict]:
    """
    轻量LNS：随机挑两道同机或相邻机的操作“交换顺序/微调机器”，若可行且Cmax降则接受。
    为了简洁，我们用粗粒度“重解局部”的方式：对受影响的工序段按规则ECT重排。
    """
    best_assign = dict(assign)
    best_mk = max(e for (_,(_,_,e)) in best_assign.items())
    rng = random.Random(0)

    # 把当前方案转成每机队列
    def rebuild_state(assign_map):
        st = FJSPState(pb)
        # 直接“回放”当前排程顺序不易；这里采用受扰后局部重构：
        # 简化：把所有未安排的（无）+ 已安排的“释放出来”，再按ECT贪心重新排一遍
        # 为保持可行，我们直接从空状态按ECT重排
        while not st.done():
            cands = st.candidates()
            best=None; best_sc=1e18
            for (j,k,mm,pt,stt,enn) in cands:
                sc = enn
                if sc < best_sc:
                    best_sc=sc; best=(j,k,mm,pt,stt,enn)
            st.step_assign(best)
        return st.makespan, st.assign

    for _ in range(tries):
        mk, new_assign = rebuild_state(best_assign)
        if mk < best_mk:
            best_mk = mk; best_assign = new_assign
    return best_mk, best_assign


# =========================
# viz 子模块：甘特图
# =========================

def plot_gantt(pb: Problem, assign: Dict[Tuple[int,int], Tuple[int,float,float]], title: str):
    bars = {mm: [] for mm in range(pb.m)}
    for (j,k), (mm,s,e) in assign.items():
        bars[mm].append((s, e-s, f"J{j}-O{k}"))
    fig, ax = plt.subplots(figsize=(12, 0.7*pb.m + 3))
    yticks, ylabels = [], []
    for idx, mm in enumerate(sorted(bars.keys())):
        items = sorted(bars[mm], key=lambda x: x[0])
        for (start, dur, label) in items:
            ax.barh(idx, dur, left=start)
            ax.text(start+dur/2, idx, label, ha="center", va="center", fontsize=8)
        yticks.append(idx); ylabels.append(f"M{mm}")
    ax.set_yticks(yticks); ax.set_yticklabels(ylabels)
    ax.set_xlabel("Time"); ax.set_title(title)
    ax.grid(axis="x", alpha=0.3)
    plt.tight_layout(); plt.show()


# =========================
# main 演示
# =========================

def main():
    np.random.seed(0); random.seed(0); torch.manual_seed(0)

    # 1) 生成训练/验证数据（小/中规模）
    print("Building datasets ...")
    train_ds = build_training_data(n_instances=250, n_jobs_range=(6,10), m=5, ops_per_job=4, seed=123)
    val_ds   = build_training_data(n_instances=60,  n_jobs_range=(8,10), m=5, ops_per_job=4, seed=456)

    # 2) 训练思维模型
    g_dim = train_ds.items[0]["g"].shape[0]
    c_dim = train_ds.items[0]["c"].shape[1]
    trainer = Trainer(g_dim, c_dim)
    tr_hist, va_hist = trainer.fit(train_ds, val_ds, epochs=12, batch_size=64)
    plot_learning_curve(tr_hist, va_hist, title="Imitation Learning - Loss")

    # 3) 小规模评估：与规则基线对比
    print("\n=== Small-scale evaluation ===")
    test_small = build_random_problem(n_jobs=10, m=5, ops_per_job=4, seed=2024)
    sched = Scheduler(trainer.model, memory=SimpleMemory(dim=128), alpha_mem=0.2)
    mk_model, assign_model = sched.schedule(test_small, use_memory=True)
    print(f"Model  makespan: {mk_model:.2f}")
    for r in BASELINE_RULES:
        s = FJSPState(test_small)
        mk_rule = greedy_finish(s, r)
        print(f"{r:<6} makespan: {mk_rule:.2f}")
    plot_gantt(test_small, assign_model, title=f"Model schedule (small) mk={mk_model:.1f}")

    # 4) 大规模泛化：直接用模型排程，再做一次轻量修补（可选）
    print("\n=== Larger-scale generalization ===")
    test_big = build_random_problem(n_jobs=20, m=5, ops_per_job=4, seed=2025)
    mk_big, assign_big = sched.schedule(test_big, use_memory=True)
    print(f"Model  makespan (big): {mk_big:.2f}")
    mk_big_rep, assign_big_rep = local_repair_swap(test_big, assign_big, tries=60)
    print(f"After light repair:    {mk_big_rep:.2f}")
    plot_gantt(test_big, assign_big_rep, title=f"Model schedule (big+LNS) mk={mk_big_rep:.1f}")

    # 5) 保存/加载思维模型（权重+可选归一化信息）
    save_thinking_model(trainer.model, "thinking_model.pt", norm_dict={})
    model2, _ = load_thinking_model("thinking_model.pt", g_dim, c_dim)
    sched2 = Scheduler(model2, memory=SimpleMemory(dim=128), alpha_mem=0.2)
    mk_reload, _ = sched2.schedule(test_small, use_memory=True)
    print(f"\nReloaded model makespan: {mk_reload:.2f} (should be similar to Model)")

if __name__ == "__main__":
    main()
