#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：PTModel1.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/20 20:20 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""

如果我想训练一个模型去解决柔性作业车间调度问题，用小规模的数据去训练他，训练完之后把相关的数据留下来作为他的思维大脑，然后新来一个柔性作业车间调度问题，可以通过调用这个模型的大脑来输出方案，帮我写一下这个代码呗 谢谢你
fjsp_thinking_brain.py

目标：用小规模数据训练“思维模型（大脑）”，保存下来；新问题时调用“大脑”输出调度方案。
- 生成小规模FJSP训练样本（老师=一步前瞻+贪心收尾）
- 训练候选打分器（模仿学习）
- 保存“思维大脑”：模型参数 + 归一化统计 + 经验库(状态嵌入->动作)
- 加载大脑，对新实例进行调度（可选：检索增强）
"""

import os
import math
import json
import random
from dataclasses import dataclass
from typing import Dict, List, Tuple, Optional

import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader


# =========================
# 数据建模：FJSP 问题与状态
# =========================

@dataclass
class Operation:
    machine_time: Dict[int, int]  # 可行机 -> 加工时间

@dataclass
class Job:
    ops: List[Operation]
    due: float = 0.0              # 可选，用于特征（松弛度）

@dataclass
class Problem:
    jobs: List[Job]
    m: int                         # 机器数

    @property
    def n_jobs(self): return len(self.jobs)

    @property
    def n_ops_total(self): return sum(len(j.ops) for j in self.jobs)

def build_random_problem(n_jobs: int, m: int, ops_per_job: int,
                         seed: Optional[int] = None) -> Problem:
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        est = 0
        for _o in range(ops_per_job):
            k = 2 if m >= 2 else 1
            machines = sorted(rng.sample(range(m), k=k))
            mt = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(mt))
            est += min(mt.values())
        due = est + rng.randint(5, 20)  # 给个简单交期
        jobs.append(Job(ops=ops, due=float(due)))
    return Problem(jobs=jobs, m=m)

class FJSPState:
    """逐步派工的状态容器"""
    def __init__(self, pb: Problem):
        self.pb = pb
        self.n_jobs = pb.n_jobs
        self.m = pb.m
        self.next_op = np.zeros(self.n_jobs, dtype=np.int32)
        self.job_ready = np.zeros(self.n_jobs, dtype=np.float32)
        self.mach_ready = np.zeros(self.m, dtype=np.float32)
        self.rem_work = np.array([
            sum(min(op.machine_time.values()) for op in job.ops)
            for job in pb.jobs
        ], dtype=np.float32)
        self.done_ops = 0
        self.total_ops = pb.n_ops_total
        self.assign: Dict[Tuple[int,int], Tuple[int,float,float]] = {}  # (j,k)->(m,start,end)
        self.makespan = 0.0

    def clone(self):
        s = FJSPState(self.pb)
        s.next_op = self.next_op.copy()
        s.job_ready = self.job_ready.copy()
        s.mach_ready = self.mach_ready.copy()
        s.rem_work = self.rem_work.copy()
        s.done_ops = self.done_ops
        s.total_ops = self.total_ops
        s.assign = dict(self.assign)
        s.makespan = self.makespan
        return s

    def candidates(self):
        """返回可派工候选：(j, k, mm, pt, start, end)"""
        cands = []
        for j in range(self.n_jobs):
            k = int(self.next_op[j])
            if k >= len(self.pb.jobs[j].ops): continue
            op = self.pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(self.mach_ready[mm]), float(self.job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        return cands

    def step_assign(self, cand):
        """执行派工"""
        j, k, mm, pt, start, end = cand
        self.mach_ready[mm] = end
        self.job_ready[j] = end
        self.assign[(j, k)] = (mm, start, end)
        self.makespan = max(self.makespan, end)
        self.next_op[j] += 1
        # for 特征：按最短可行时间衰减剩余工作量
        min_time_this_op = min(self.pb.jobs[j].ops[k].machine_time.values())
        self.rem_work[j] = max(0.0, self.rem_work[j] - float(min_time_this_op))
        self.done_ops += 1

    def done(self):
        return self.done_ops >= self.total_ops


# =========================
# 老师策略：一步前瞻 + 贪心收尾
# =========================

def rule_score(name: str, ctx: dict) -> float:
    # 越小越好
    if name == "SPT":   return ctx["pt"]
    if name == "MWKR":  return -ctx["rem_work"]
    if name == "MOPNR": return -(ctx["rem_ops"])
    if name == "ECT":   return ctx["end"]
    return ctx["end"]

def greedy_finish(state: FJSPState, rule_name: str) -> float:
    s = state.clone()
    while not s.done():
        cands = s.candidates()
        best, best_score = None, float("inf")
        for (j,k,mm,pt,st,en) in cands:
            ctx = {"pt": pt,
                   "rem_work": float(s.rem_work[j]),
                   "rem_ops": float(len(s.pb.jobs[j].ops)-k),
                   "end": en}
            sc = rule_score(rule_name, ctx)
            if (sc < best_score) or (abs(sc-best_score)<1e-9 and en < (best[5] if best else 1e18)):
                best, best_score = (j,k,mm,pt,st,en), sc
        s.step_assign(best)
    return s.makespan

def oracle_select(state: FJSPState, tail_rule="ECT") -> int:
    """老师：枚举当前候选，模拟一步再用 rule 收尾，选 makespan 最小的那个候选索引"""
    cands = state.candidates()
    best_idx, best_mk = 0, float("inf")
    for idx, cand in enumerate(cands):
        s2 = state.clone()
        s2.step_assign(cand)
        mk_est = greedy_finish(s2, tail_rule)
        if mk_est < best_mk:
            best_mk = mk_est
            best_idx = idx
    return best_idx


# =========================
# 特征工程
# =========================

def global_stats(state: FJSPState) -> np.ndarray:
    jr, mr, rw = state.job_ready, state.mach_ready, state.rem_work
    noi = state.next_op.astype(np.float32)
    def stats(vec):
        if vec.size == 0: return [0,0,0,0]
        return [float(np.mean(vec)), float(np.max(vec)), float(np.min(vec)), float(np.std(vec)+1e-8)]
    feats = []
    feats += stats(jr); feats += stats(mr); feats += stats(rw); feats += stats(noi)
    feats += [state.done_ops / max(1, state.total_ops), float(state.total_ops), float(state.m)]
    return np.array(feats, dtype=np.float32)

def cand_features(state: FJSPState, cand) -> np.ndarray:
    j, k, mm, pt, start, end = cand
    rem_ops = len(state.pb.jobs[j].ops) - k
    slack = float(state.pb.jobs[j].due - start)
    feats = [
        pt, start, end, slack,
        float(state.job_ready[j]),
        float(state.mach_ready[mm]),
        float(state.rem_work[j]),
        float(rem_ops),
        float(mm),
    ]
    # log 抑制
    for t in range(7):
        feats[t] = math.log1p(max(0.0, feats[t]))
    return np.array(feats, dtype=np.float32)


# =========================
# 数据集（步骤级监督）
# =========================

class StepDataset(Dataset):
    def __init__(self): self.items=[]
    def __len__(self): return len(self.items)
    def __getitem__(self, idx): return self.items[idx]
    def add(self, g, c, m, y):
        self.items.append({"g":g.astype(np.float32),
                           "c":c.astype(np.float32),
                           "m":m.astype(np.float32),
                           "y":int(y)})

def collate_batch(batch):
    maxC = max(b["c"].shape[0] for b in batch)
    G=[]; C=[]; M=[]; Y=[]
    for b in batch:
        g,c,m,y = b["g"], b["c"], b["m"], b["y"]
        pad = maxC - c.shape[0]
        if pad>0:
            c = np.pad(c, ((0,pad),(0,0)), mode="constant", constant_values=0.0)
            m = np.pad(m, ((0,pad)), mode="constant", constant_values=0.0)
        G.append(g); C.append(c); M.append(m); Y.append(y)
    return (torch.tensor(np.stack(G),dtype=torch.float32),
            torch.tensor(np.stack(C),dtype=torch.float32),
            torch.tensor(np.stack(M),dtype=torch.float32),
            torch.tensor(Y,dtype=torch.long))

def build_training_data(n_instances: int = 300,
                        n_jobs_range=(6, 10),
                        m: int = 5,
                        ops_per_job: int = 4,
                        seed: int = 0) -> StepDataset:
    rng = random.Random(seed)
    ds = StepDataset()
    for _ in range(n_instances):
        n_jobs = rng.randint(n_jobs_range[0], n_jobs_range[1])
        pb = build_random_problem(n_jobs, m, ops_per_job, seed=rng.randint(0,1<<30))
        st = FJSPState(pb)
        while not st.done():
            cands = st.candidates()
            g = global_stats(st)
            cf = np.stack([cand_features(st, c) for c in cands], axis=0)
            mask = np.ones(cf.shape[0], dtype=np.float32)
            y = oracle_select(st, tail_rule="ECT")
            ds.add(g, cf, mask, y)
            st.step_assign(cands[y])
    return ds


# =========================
# 思维模型（候选打分器）
# =========================

class ScoreModel(nn.Module):
    def forward(self, G:torch.Tensor, C:torch.Tensor, mask:torch.Tensor) -> torch.Tensor:
        raise NotImplementedError
    def embed_state(self, G:torch.Tensor) -> torch.Tensor:
        raise NotImplementedError

class MLPScore(ScoreModel):
    def __init__(self, g_dim:int, c_dim:int, hidden:int=128, embed_dim:int=128):
        super().__init__()
        self.ctx = nn.Sequential(
            nn.Linear(g_dim, hidden), nn.ReLU(),
            nn.Linear(hidden, embed_dim), nn.ReLU(),
        )
        self.cand = nn.Sequential(
            nn.Linear(c_dim + embed_dim, hidden), nn.ReLU(),
            nn.Linear(hidden, hidden), nn.ReLU(),
            nn.Linear(hidden, 1)
        )
        self.embed_dim = embed_dim

    def forward(self, G,C,mask):
        B,Cmax,Cdim = C.shape
        ctx = self.ctx(G)                         # [B,E]
        ctx_rep = ctx.unsqueeze(1).expand(-1,Cmax,-1)
        x = torch.cat([C, ctx_rep], dim=-1)       # [B,Cmax,E+Cdim]
        s = self.cand(x).squeeze(-1)              # [B,Cmax]
        s = s.masked_fill(mask < 0.5, -1e9)
        return s

    def embed_state(self, G):
        with torch.no_grad():
            return self.ctx(G)                    # [B,E]


# =========================
# 训练器（模仿学习）
# =========================

class Trainer:
    def __init__(self, g_dim, c_dim, device=None):
        self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = MLPScore(g_dim, c_dim, hidden=128, embed_dim=128).to(self.device)
        self.opt = optim.Adam(self.model.parameters(), lr=1e-3)
        self.crit = nn.CrossEntropyLoss()

    def fit(self, train_ds:StepDataset, val_ds:StepDataset, epochs=12, batch_size=64):
        tr_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
        va_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False, collate_fn=collate_batch)
        history = {"train_loss":[], "val_loss":[], "val_acc":[]}
        for ep in range(1, epochs+1):
            self.model.train(); tot=0; corr=0; cnt=0
            for G,C,M,Y in tr_loader:
                G,C,M,Y = G.to(self.device),C.to(self.device),M.to(self.device),Y.to(self.device)
                self.opt.zero_grad()
                s = self.model(G,C,M)             # [B,Cmax]
                loss = self.crit(s, Y)
                loss.backward()
                nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                self.opt.step()
                tot += float(loss.item())*G.size(0); cnt+=G.size(0)
                corr += int((s.argmax(1)==Y).sum().item())
            tr_loss = tot/max(1,cnt); tr_acc = corr/max(1,cnt)

            # 验证
            self.model.eval(); vtot=0; vcnt=0; vcorr=0
            with torch.no_grad():
                for G,C,M,Y in va_loader:
                    G,C,M,Y = G.to(self.device),C.to(self.device),M.to(self.device),Y.to(self.device)
                    s = self.model(G,C,M); loss = self.crit(s, Y)
                    vtot += float(loss.item())*G.size(0); vcnt+=G.size(0)
                    vcorr += int((s.argmax(1)==Y).sum().item())
            va_loss = vtot/max(1,vcnt); va_acc = vcorr/max(1,vcnt)
            history["train_loss"].append(tr_loss)
            history["val_loss"].append(va_loss)
            history["val_acc"].append(va_acc)
            print(f"[Epoch {ep:02d}] train_loss={tr_loss:.4f} acc={tr_acc:.3f} | val_loss={va_loss:.4f} acc={va_acc:.3f}")
        return history


# =========================
# “大脑”的保存与加载（模型+归一化+经验库）
# =========================

class SimpleMemory:
    """极简经验库：存 (state_embed, top_action)；做近邻检索提供先验"""
    def __init__(self, dim:int):
        self.dim = dim
        self.vecs = np.empty((0, dim), dtype=np.float32)
        self.acts = np.empty((0, 1), dtype=np.int64)

    def add(self, vec:np.ndarray, act:int):
        self.vecs = np.vstack([self.vecs, vec.reshape(1,-1)])
        self.acts = np.vstack([self.acts, np.array([[act]],dtype=np.int64)])

    def topk(self, q:np.ndarray, k:int=8) -> List[int]:
        if self.vecs.shape[0]==0: return []
        q = q.reshape(1,-1)
        d = np.sum((self.vecs - q)**2, axis=1)    # L2
        idx = np.argsort(d)[:min(k, len(d))]
        return [int(self.acts[i,0]) for i in idx]

def compute_norm_stats(ds: StepDataset):
    # 简单做标准化统计：g与c逐维均值方差（c按列合并）
    G = np.stack([it["g"] for it in ds.items], axis=0)        # [N, g_dim]
    C_all = np.concatenate([it["c"] for it in ds.items], axis=0)  # [sumC, c_dim]
    g_mu, g_std = G.mean(0), G.std(0)+1e-8
    c_mu, c_std = C_all.mean(0), C_all.std(0)+1e-8
    return {"g_mu":g_mu.tolist(),"g_std":g_std.tolist(),
            "c_mu":c_mu.tolist(),"c_std":c_std.tolist()}

def apply_norm(g: np.ndarray, c: np.ndarray, norm: dict):
    g2 = (g - np.array(norm["g_mu"])) / np.array(norm["g_std"])
    c2 = (c - np.array(norm["c_mu"])) / np.array(norm["c_std"])
    return g2, c2

def save_brain(model:MLPScore, norm:dict, memory:SimpleMemory, path_dir="brain"):
    os.makedirs(path_dir, exist_ok=True)
    torch.save(model.state_dict(), os.path.join(path_dir, "model.pt"))
    with open(os.path.join(path_dir, "norm.json"), "w", encoding="utf-8") as f:
        json.dump(norm, f, ensure_ascii=False, indent=2)
    # 存经验库
    np.save(os.path.join(path_dir, "mem_vecs.npy"), memory.vecs)
    np.save(os.path.join(path_dir, "mem_acts.npy"), memory.acts)
    print(f"[Brain saved] => {os.path.abspath(path_dir)}")

def load_brain(g_dim:int, c_dim:int, path_dir="brain", device=None) -> Tuple[MLPScore, dict, SimpleMemory]:
    device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = MLPScore(g_dim, c_dim, hidden=128, embed_dim=128)
    model.load_state_dict(torch.load(os.path.join(path_dir, "model.pt"), map_location=device))
    model.to(device)
    with open(os.path.join(path_dir, "norm.json"), "r", encoding="utf-8") as f:
        norm = json.load(f)
    mem = SimpleMemory(dim=128)
    if os.path.exists(os.path.join(path_dir, "mem_vecs.npy")):
        mem.vecs = np.load(os.path.join(path_dir, "mem_vecs.npy"))
        mem.acts = np.load(os.path.join(path_dir, "mem_acts.npy"))
    return model, norm, mem


# =========================
# 推理：加载大脑并调度新实例
# =========================

class Scheduler:
    def __init__(self, model:MLPScore, norm:dict, memory:Optional[SimpleMemory]=None,
                 alpha_mem:float=0.25, device=None):
        self.model = model
        self.norm = norm
        self.memory = memory
        self.alpha_mem = alpha_mem
        self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")

    def schedule(self, pb:Problem, use_memory:bool=True) -> Tuple[float, Dict]:
        st = FJSPState(pb)
        self.model.eval()
        with torch.no_grad():
            while not st.done():
                cands = st.candidates()
                # 取特征 + 归一化
                g = global_stats(st)
                cf = np.stack([cand_features(st, c) for c in cands], axis=0)
                g, cf = apply_norm(g, cf, self.norm)

                # 转张量打分
                Gt = torch.tensor(g,dtype=torch.float32).unsqueeze(0).to(self.device)
                Ct = torch.tensor(cf,dtype=torch.float32).unsqueeze(0).to(self.device)
                Mt = torch.ones((1, Ct.shape[1]), dtype=torch.float32).to(self.device)
                scores = self.model(Gt,Ct,Mt).squeeze(0).cpu().numpy()  # [C]

                # 经验库先验（可选）
                if use_memory and self.memory is not None:
                    emb = self.model.embed_state(Gt).cpu().numpy().reshape(-1)
                    neigh = self.memory.topk(emb, k=8)
                    if len(neigh)>0:
                        prior = np.zeros_like(scores)
                        for a in neigh:
                            if a < len(scores):
                                prior[a] += 1.0
                        if prior.sum()>0:
                            prior = prior / (prior.sum()+1e-9)
                            # 把prior转成对分数的负偏置（分数越大越优）
                            scores = (1-self.alpha_mem)*scores + self.alpha_mem*(scores.max() + prior)

                idx = int(np.argmax(scores))
                st.step_assign(cands[idx])

                # 追加经验（在线持续学习的素材）
                if self.memory is not None:
                    emb2 = self.model.embed_state(
                        torch.tensor(apply_norm(global_stats(st), np.zeros((1,cf.shape[1])), self.norm)[0],
                                     dtype=torch.float32).unsqueeze(0).to(self.device)
                    ).cpu().numpy().reshape(-1)
                    self.memory.add(emb2, idx)

        return st.makespan, st.assign


# =========================
# 可视化：甘特图（查看排程）
# =========================

def plot_gantt(pb: Problem, assign: Dict[Tuple[int,int], Tuple[int,float,float]], title: str):
    bars = {mm: [] for mm in range(pb.m)}
    for (j,k), (mm,s,e) in assign.items():
        bars[mm].append((s, e-s, f"J{j}-O{k}"))
    fig, ax = plt.subplots(figsize=(12, 0.7*pb.m + 3))
    yticks, ylabels = [], []
    for idx, mm in enumerate(sorted(bars.keys())):
        items = sorted(bars[mm], key=lambda x: x[0])
        for (start, dur, label) in items:
            ax.barh(idx, dur, left=start)
            ax.text(start+dur/2, idx, label, ha="center", va="center", fontsize=8)
        yticks.append(idx); ylabels.append(f"M{mm}")
    ax.set_yticks(yticks); ax.set_yticklabels(ylabels)
    ax.set_xlabel("Time"); ax.set_title(title)
    ax.grid(axis="x", alpha=0.3)
    plt.tight_layout(); plt.show()


# =========================
# 主程序：训练-保存-加载-推理
# =========================

def main():
    np.random.seed(0); random.seed(0); torch.manual_seed(0)

    # 1) 用小规模数据训练“思维模型”
    print("==> Building datasets ...")
    train_ds = build_training_data(n_instances=320, n_jobs_range=(6,10), m=5, ops_per_job=4, seed=123)
    val_ds   = build_training_data(n_instances=80,  n_jobs_range=(8,10), m=5, ops_per_job=4, seed=456)

    g_dim = train_ds.items[0]["g"].shape[0]
    c_dim = train_ds.items[0]["c"].shape[1]
    norm  = compute_norm_stats(train_ds)

    # 训练器
    trainer = Trainer(g_dim, c_dim)
    print("==> Training ...")
    hist = trainer.fit(train_ds, val_ds, epochs=12, batch_size=64)

    # 2) 构建经验库（把训练集状态嵌入 & 老师动作存起来）
    print("==> Building memory (experience) ...")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    memory = SimpleMemory(dim=128)
    trainer.model.eval()
    with torch.no_grad():
        # 取训练样本的一部分写入经验库
        for i in range(0, min(2000, len(train_ds)), 5):
            it = train_ds.items[i]
            g = (it["g"] - np.array(norm["g_mu"])) / np.array(norm["g_std"])
            Gt = torch.tensor(g,dtype=torch.float32).unsqueeze(0).to(device)
            emb = trainer.model.embed_state(Gt).cpu().numpy().reshape(-1)
            memory.add(emb, it["y"])

    # 3) 保存“思维大脑”：模型+归一化+经验库
    save_brain(trainer.model, norm, memory, path_dir="brain")

    # 4) 加载大脑，对新实例（更大些）进行调度
    print("==> Load brain & schedule a new (larger) instance ...")
    model2, norm2, mem2 = load_brain(g_dim, c_dim, path_dir="brain", device=device)
    scheduler = Scheduler(model2, norm2, memory=mem2, alpha_mem=0.25, device=device)

    test_big = build_random_problem(n_jobs=20, m=5, ops_per_job=4, seed=2025)
    mk, assign = scheduler.schedule(test_big, use_memory=True)
    print(f"[New instance] Model makespan: {mk:.2f}")

    # 5) 可视化甘特图
    plot_gantt(test_big, assign, title=f"Model schedule (big) mk={mk:.1f}")

if __name__ == "__main__":
    main()
