#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T3.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/18 20:23 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
根据结果导向出所有的材料
paper_pipeline.py

一键复现“跨产线共享缓冲（WIP sharing）容量与传输率优化”论文式材料的脚本（纯Python，无仿真器）：
1) 自动生成工厂参数（可替换为真实表）
2) 解析近似计算（可用度/稳定度/到达率/串行瓶颈）评估 W、θ
3) 多目标萤火虫（C-MOHFA风格精简版）分别优化 Sharing=ON/OFF
4) 导出所有材料（CSV/JSON/PNG/MD）

依赖：numpy, matplotlib, pandas
用法：python paper_pipeline.py
输出：./outputs/ 下的所有文件
"""

import os
import json
import math
import random
from dataclasses import dataclass
from typing import List, Dict, Tuple, Optional
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# =========================
# 基础数据结构
# =========================

@dataclass
class MachineParam:
    omega: float     # 服务率（units/hour）
    MTBF_h: float    # 平均无故障时长（小时）
    MTTR_h: float    # 平均修复时长（小时）

@dataclass
class Line:
    machines: List[MachineParam]
    unit_earnings: List[float]  # 单位收益（按品类取均值）
    input_qty: float            # 外部投料量（用于θ归一化）

@dataclass
class Plant:
    lines: List[Line]
    share_mask: np.ndarray      # [n, n, m-1]，若 line i 的 B_j 可与 line k 共享则为1
    transport_cost: np.ndarray  # [n, n, m-1]，跨线一次转运的成本（代理）

# =========================
# 解析近似：映射论文公式（无仿真）
# =========================

def availability(MTBF_h: float, MTTR_h: float) -> float:
    tot = MTBF_h + MTTR_h
    if tot <= 0: return 1.0
    return 1.0 - (MTTR_h / tot)

def stability_Sa(omega_up: float, omega_dn: float) -> float:
    if omega_dn <= 1e-9: return 10.0
    return max(1e-9, omega_up / omega_dn)

def vacancy_prob(Sa: float, C: int) -> float:
    Sa = max(1e-6, Sa)
    if abs(Sa - 1.0) < 1e-6:
        return 1.0 / (C + 1.0)
    num, den = (1.0 - Sa), (1.0 - (Sa ** (C + 1)))
    Pv = num / (den if abs(den) > 1e-12 else 1.0)
    return float(np.clip(Pv, 0.0, 1.0))

def stock_prob(Sa: float, Pv: float) -> float:
    return float(np.clip(Sa * Pv, 0.0, 1.0))

def idle_block_probs(Sa: float, C: int) -> Tuple[float, float]:
    Pv = vacancy_prob(Sa, C)
    Pt = stock_prob(Sa, Pv)
    Ps = max(0.0, 1.0 - Pv - 1e-6) * 0.5  # 阻塞概率代理（缓冲趋满）
    Pu = max(0.0, 1.0 - Pt - 1e-6) * 0.5  # 空闲概率代理（缓冲趋空）
    return Pu, Ps

def rb_arrival_MB(omega_j: float, Pv: float, A_j: float, Pu: float, Ps: float) -> float:
    return max(0.0, omega_j * (1.0 - Pu - Ps) * A_j * Pv)

def rm_arrival_BM(omega_j: float, Rb_up: float, Pt_up: float, A_j: float, Pu: float, Ps: float) -> float:
    cap = omega_j * A_j
    return max(0.0, min(Rb_up, cap) * (1.0 - Ps))

def share_probs(Pv_src: float, Pt_dst: float) -> Tuple[float, float]:
    Ptp_src = 1.0 - (1.0 - Pv_src) * (1.0 - Pt_dst)
    Ptp_dst = 1.0 - (1.0 - Pt_dst) * (1.0 - Pv_src)
    return float(np.clip(Ptp_src, 0.0, 1.0)), float(np.clip(Ptp_dst, 0.0, 1.0))

def update_with_sharing(Rb_src: float, Pv_src: float, A_src: float,
                        Rb_dst_down: float, Pt_dst: float, Pu_src: float, Ps_src: float,
                        Ts_src: float, mask_active: bool) -> float:
    if not mask_active or Ts_src <= 1e-9:
        return Rb_src
    Ptp_src, _ = share_probs(Pv_src, Pt_dst)
    share_gain = (1.0 - Pu_src - Ps_src) * A_src * Ptp_src * Ts_src
    return max(0.0, Rb_src + share_gain)

@dataclass
class EvalParams:
    C_max_total: int
    C_min_each: int
    time_horizon: float          # 小时
    transport_fee_scale: float   # Ts*cost缩放为费用单位
    w_negW: float = 1.0
    w_negTheta: float = 1.0

def evaluate_solution(plant: Plant,
                      C: np.ndarray,    # (n, m-1) int
                      Ts: np.ndarray,   # (n, m-1) float in [0,1]
                      prm: EvalParams,
                      sharing_on: bool = True) -> Tuple[Tuple[float,float], Dict]:
    n = len(plant.lines)
    m = len(plant.lines[0].machines)
    T = prm.time_horizon
    share_mask = plant.share_mask if sharing_on else np.zeros_like(plant.share_mask)
    cost = plant.transport_cost

    A = np.zeros((n, m)); om = np.zeros((n, m))
    for i, line in enumerate(plant.lines):
        for j, mp in enumerate(line.machines):
            A[i,j]  = availability(mp.MTBF_h, mp.MTTR_h)
            om[i,j] = mp.omega

    Pv = np.zeros((n, m-1)); Pt = np.zeros((n, m-1))
    Pu = np.zeros((n, m-1)); Ps = np.zeros((n, m-1))
    Rb = np.zeros((n, m-1)); Rm = np.zeros((n, m))

    # 机器-缓冲（M-B）层
    for i in range(n):
        for j in range(m-1):
            Sa = stability_Sa(om[i,j], om[i,j+1])
            Pv[i,j] = vacancy_prob(Sa, int(C[i,j]))
            Pt[i,j] = stock_prob(Sa, Pv[i,j])
            Pu[i,j], Ps[i,j] = idle_block_probs(Sa, int(C[i,j]))
            Rb[i,j] = rb_arrival_MB(om[i,j], Pv[i,j], A[i,j], Pu[i,j], Ps[i,j])

    # 跨线共享修正 Rb
    if sharing_on:
        for j in range(m-1):
            Rb_new = Rb[:, j].copy()
            for i in range(n):
                for k in range(n):
                    if k == i: continue
                    if share_mask[i, k, j] <= 0: continue
                    Rb_dst_down = om[k, j+1] * A[k, j+1]
                    Rb_new[i] = update_with_sharing(
                        Rb_src=Rb_new[i], Pv_src=Pv[i,j], A_src=A[i,j],
                        Rb_dst_down=Rb_dst_down, Pt_dst=Pt[k,j], Pu_src=Pu[i,j], Ps_src=Ps[i,j],
                        Ts_src=Ts[i,j], mask_active=True
                    )
            Rb[:, j] = Rb_new

    # 缓冲-机器（B-M）层到各机到达率，串行瓶颈取 min
    for i in range(n):
        Rm[i,0] = om[i,0] * A[i,0]  # 外部供给近似
        for j in range(1, m):
            Rm[i,j] = rm_arrival_BM(om[i,j], Rb[i,j-1], Pt[i,j-1], A[i,j], Pu[i,j-1], Ps[i,j-1])

    eff_rate = np.min(Rm, axis=1)   # 串行瓶颈
    theta_i = np.zeros(n)
    for i, line in enumerate(plant.lines):
        In = max(1e-6, line.input_qty)
        theta_i[i] = eff_rate[i] / In
    theta_mean = float(np.mean(theta_i))

    # 收益 W = 单位收益 * 产出 - 跨线转运费（Ts*cost*scale）
    W_lines = np.zeros(n); trans_cost = 0.0
    for i, line in enumerate(plant.lines):
        unit_price = float(np.mean(line.unit_earnings)) if line.unit_earnings else 1.0
        W_lines[i] = unit_price * eff_rate[i] * T
        for j in range(m-1):
            for k in range(n):
                if k==i: continue
                if share_mask[i,k,j] > 0:
                    trans_cost += Ts[i,j] * cost[i,k,j] * prm.transport_fee_scale
    W_total = float(np.sum(W_lines) - trans_cost)

    f1 = -W_total
    f2 = -theta_mean
    metrics = dict(W=W_total, theta=theta_mean, theta_i=theta_i, eff_rate=eff_rate)
    return (f1, f2), metrics

# =========================
# 参数自动生成（可替换为真实表）
# =========================

def build_demo_plant(n_lines=3, m_machines=9, seed=7) -> Tuple[Plant, EvalParams]:
    rng = np.random.RandomState(seed)
    lines: List[Line] = []
    for i in range(n_lines):
        machines = []
        for j in range(m_machines):
            low_per_min  = rng.randint(8, 14) + i
            high_per_min = rng.randint(15, 24) + i
            if high_per_min <= low_per_min: high_per_min = low_per_min + 2
            # 这里把“件/分钟”均值转为“件/小时”用于统一
            per_hour = 0.5 * (low_per_min + high_per_min) * 60.0 / 60.0
            omega = per_hour  # units/hour
            MTBF_h = float(rng.randint(240, 360))
            MTTR_h = float(rng.uniform(20.0, 35.0)) / 60.0
            machines.append(MachineParam(omega=omega, MTBF_h=MTBF_h, MTTR_h=MTTR_h))
        unit_earnings = list(3.5 + 1.5 * rng.rand(5))
        input_qty = float(8000 + 2000 * rng.rand())
        lines.append(Line(machines=machines, unit_earnings=unit_earnings, input_qty=input_qty))

    n = n_lines; m = m_machines
    share_mask = np.zeros((n, n, m-1), dtype=int)
    for i in range(n):
        for k in range(n):
            if i == k: continue
            for j in [1,3,5]:   # B2,B4,B6 互联
                share_mask[i,k,j] = 1
            for j in [2,4]:     # B3,B5 仅 L2<->L3
                if (i in [1,2]) and (k in [1,2]):
                    share_mask[i,k,j] = 1
            for j in [6]:       # B7 仅 L1<->L3
                if (i in [0,2]) and (k in [0,2]):
                    share_mask[i,k,j] = 1

    cost = np.zeros((n, n, m-1), dtype=float)
    for i in range(n):
        for k in range(n):
            if i==k: continue
            for j in range(m-1):
                if share_mask[i,k,j] == 1:
                    cost[i,k,j] = float(rng.randint(4,6))

    plant = Plant(lines=lines, share_mask=share_mask, transport_cost=cost)
    prm = EvalParams(
        C_max_total=1500,
        C_min_each=5,
        time_horizon=24.0,
        transport_fee_scale=10.0,
        w_negW=1.0,
        w_negTheta=1.0
    )
    return plant, prm

# =========================
# 多目标萤火虫（C-MOHFA风格精简）
# =========================

@dataclass
class FAParams:
    pop: int = 80
    gens: int = 160
    elite: int = 6
    attr0: float = 1.0
    gamma: float = 1.0
    kappa_hi: float = 0.15
    kappa_lo: float = 0.05
    sa_T0: float = 60.0
    sa_alpha: float = 0.97
    seed: int = 1

def fast_non_dominated_sort(F: np.ndarray) -> List[List[int]]:
    P, M = F.shape
    S = [[] for _ in range(P)]
    n = np.zeros(P, dtype=int)
    rank = np.zeros(P, dtype=int)
    fronts = [[]]
    for p in range(P):
        for q in range(P):
            if p==q: continue
            if np.all(F[p] <= F[q]) and np.any(F[p] < F[q]):
                S[p].append(q)
            elif np.all(F[q] <= F[p]) and np.any(F[q] < F[p]):
                n[p] += 1
        if n[p] == 0:
            rank[p] = 0; fronts[0].append(p)
    i = 0
    while len(fronts[i])>0:
        Q=[]
        for p in fronts[i]:
            for q in S[p]:
                n[q] -= 1
                if n[q] == 0:
                    rank[q]=i+1; Q.append(q)
        i += 1
        fronts.append(Q)
    fronts.pop()
    return fronts

def crowding_distance(front: List[int], F: np.ndarray) -> np.ndarray:
    if len(front)==0: return np.array([])
    m = F.shape[1]; cd = np.zeros(len(front), dtype=float)
    idx = np.array(front, dtype=int)
    for k in range(m):
        order = np.argsort(F[idx, k])
        val = F[idx[order], k]
        cd[order[0]] = cd[order[-1]] = 1e9
        denom = max(1e-12, val[-1]-val[0])
        for i in range(1, len(front)-1):
            cd[order[i]] += (val[i+1]-val[i-1]) / denom
    return cd

class FireflyMO:
    def __init__(self, plant: Plant, prm: EvalParams, fap: FAParams, sharing_on: bool):
        self.plant = plant
        self.prm = prm
        self.fap = fap
        self.sharing_on = sharing_on
        self.n = len(plant.lines)
        self.m = len(plant.lines[0].machines)
        self.rng = np.random.RandomState(fap.seed)
        self.Cmin = prm.C_min_each
        self.Cmax_total = prm.C_max_total
        self.nvarC = self.n * (self.m - 1)
        self.nvarTs = self.n * (self.m - 1)
        allow = np.zeros((self.n, self.m - 1), dtype=float)
        for i in range(self.n):
            for j in range(self.m - 1):
                allow[i,j] = 1.0 if np.any(self.plant.share_mask[i, :, j] > 0) else 0.0
        self.Ts_allow = allow if self.sharing_on else np.zeros_like(allow)

    # 初始化策略
    def init_random(self):
        C = self.rng.randint(self.Cmin, self.Cmin+20, size=(self.n, self.m-1))
        self._scale_C_total(C)
        Ts = self.rng.rand(self.n, self.m-1) * self.Ts_allow
        return C, Ts

    def init_gauss(self):
        C = np.zeros((self.n, self.m-1), dtype=int)
        for i in range(self.n):
            x = np.linspace(0,1,self.m-1)
            mu = self.rng.uniform(0.3,0.7)
            sigma = self.rng.uniform(0.12,0.25)
            w = np.exp(-0.5*((x-mu)**2)/(sigma**2)); w = (w+1e-3)/np.sum(w+1e-3)
            alloc = (w * (self.Cmax_total/self.n)).astype(int)
            C[i] = np.maximum(alloc, self.Cmin)
        self._scale_C_total(C)
        Ts = 0.3*np.ones((self.n,self.m-1))*self.Ts_allow
        return C, Ts

    def init_rate(self):
        C = np.zeros((self.n, self.m-1), dtype=int)
        for i, ln in enumerate(self.plant.lines):
            up = np.array([ln.machines[j].omega for j in range(self.m-1)], dtype=float)
            w = up / max(1e-9, np.sum(up))
            alloc = (w * (self.Cmax_total/self.n)).astype(int)
            C[i] = np.maximum(alloc, self.Cmin)
        self._scale_C_total(C)
        Ts = 0.5*np.ones((self.n,self.m-1))*self.Ts_allow
        return C, Ts

    def _scale_C_total(self, C: np.ndarray):
        C[C < self.Cmin] = self.Cmin
        tot = int(np.sum(C))
        if tot <= self.Cmax_total: return
        over = tot - self.Cmax_total
        idxs = [(i,j) for i in range(self.n) for j in range(self.m-1)]
        idxs.sort(key=lambda ij: C[ij[0], ij[1]], reverse=True)
        for (i,j) in idxs:
            if over <= 0: break
            reducible = C[i,j] - self.Cmin
            if reducible <= 0: continue
            d = min(reducible, max(1, over//4))
            C[i,j] -= d; over -= d
        while np.sum(C) > self.Cmax_total:
            for (i,j) in idxs:
                if np.sum(C) <= self.Cmax_total: break
                if C[i,j] > self.Cmin:
                    C[i,j] -= 1

    def pack(self, C, Ts): return np.concatenate([C.flatten().astype(float), Ts.flatten().astype(float)], axis=0)
    def unpack(self, x):
        C = x[:self.nvarC].reshape(self.n, self.m-1)
        Ts = x[self.nvarC:].reshape(self.n, self.m-1)
        return C.astype(int), Ts

    def fitness(self, x) -> Tuple[np.ndarray, Dict]:
        C, Ts = self.unpack(x)
        C_rep = C.copy(); self._scale_C_total(C_rep)
        Ts = np.clip(Ts, 0.0, 1.0) * self.Ts_allow
        (f1,f2), met = evaluate_solution(self.plant, C_rep, Ts, self.prm, sharing_on=self.sharing_on)
        return np.array([f1,f2], dtype=float), met

    def refine_Ts_SA(self, C, Ts_init):
        Ts = Ts_init.copy()
        allow = self.Ts_allow
        T = self.fap.sa_T0
        best_Ts = Ts.copy()
        best_F, _ = self.fitness(self.pack(C, Ts))
        for _ in range(40):
            nbh = Ts + self.rng.randn(*Ts.shape)*0.08
            nbh = np.clip(nbh, 0.0, 1.0) * allow
            F_new, _ = self.fitness(self.pack(C, nbh))
            if (F_new <= best_F).all() or self.rng.rand() < math.exp(-np.sum(F_new - best_F) / max(1e-6, T)):
                Ts = nbh
                if (F_new <= best_F).all():
                    best_F = F_new; best_Ts = Ts.copy()
            T *= self.fap.sa_alpha
        return best_Ts

    def run(self, verbose=True):
        P, G = self.fap.pop, self.fap.gens
        inits=[]
        for _ in range(P//3): inits.append(self.init_random())
        for _ in range(P//3): inits.append(self.init_gauss())
        while len(inits) < P: inits.append(self.init_rate())
        X = np.stack([self.pack(C, Ts) for C,Ts in inits], axis=0)

        F = np.zeros((P,2)); MET=[None]*P
        for i in range(P): F[i], MET[i] = self.fitness(X[i])

        archX=[]; archF=[]
        def update_archive(Xlist, Flist):
            nonlocal archX, archF
            allX = archX + [x.copy() for x in Xlist]
            allF = archF + [f.copy() for f in Flist]
            Farr = np.stack(allF, axis=0)
            fronts = fast_non_dominated_sort(Farr)
            newX, newF = [], []
            for idx in fronts[0]:
                newX.append(allX[idx].copy()); newF.append(allF[idx].copy())
            archX, archF = newX, newF

        update_archive(list(X), list(F))

        for g in range(1, G+1):
            fronts = fast_non_dominated_sort(F)
            elite_idx=[]
            for fr in fronts:
                if len(elite_idx) >= self.fap.elite: break
                cd = crowding_distance(fr, F); order = np.argsort(-cd)
                for k in order:
                    if len(elite_idx) < self.fap.elite:
                        elite_idx.append(fr[k])

            elites = X[elite_idx].copy()
            attr0, gamma = self.fap.attr0, self.fap.gamma
            kappa = self.fap.kappa_hi - (self.fap.kappa_hi - self.fap.kappa_lo) * (g / G)

            Y=[]
            while len(Y) < P - self.fap.elite:
                a, b = self.rng.randint(0, P), self.rng.randint(0, P)
                xa, xb = X[a].copy(), X[b].copy()
                Fa, Fb = F[a], F[b]
                move_from, move_to = (xa, xb) if np.any(Fa > Fb) else (xb, xa)
                Ca, Ta = self.unpack(move_from)
                Cb, Tb = self.unpack(move_to)
                d = np.linalg.norm(move_from - move_to)
                attr = attr0 * math.exp(-gamma * (d**2))
                Cnew = Ca + attr*(Cb - Ca) + self.rng.randn(*Ca.shape)*kappa*10.0
                Tsnew = Ta + attr*(Tb - Ta) + self.rng.randn(*Ta.shape)*kappa
                Cnew = np.maximum(self.Cmin, np.round(Cnew)).astype(int)
                self._scale_C_total(Cnew)
                Tsnew = np.clip(Tsnew, 0.0, 1.0) * self.Ts_allow
                Tsnew = self.refine_Ts_SA(Cnew, Tsnew)
                Y.append(self.pack(Cnew, Tsnew))

            Xnew = np.vstack([elites] + Y)
            Fnew = np.zeros_like(F); METnew=[None]*P
            for i in range(P): Fnew[i], METnew[i] = self.fitness(Xnew[i])

            Xall = np.vstack([X, Xnew]); Fall = np.vstack([F, Fnew])
            fronts_all = fast_non_dominated_sort(Fall)
            Z=[]
            for fr in fronts_all:
                if len(Z)+len(fr) <= P:
                    Z += fr
                else:
                    cd = crowding_distance(fr, Fall)
                    need = P - len(Z)
                    order = np.argsort(-cd)[:need]
                    Z += [fr[k] for k in order]
                    break
            X = Xall[Z]; F = Fall[Z]
            update_archive(list(X), list(F))

            if verbose and (g % 20 == 0 or g == 1):
                bestW = -np.min(F[:,0]); bestTh = -np.min(F[:,1])
                print(f"[Gen {g:4d} | {'ON' if self.sharing_on else 'OFF'}] front={len(archX)}  best W≈{bestW:.2f}  θ≈{bestTh:.4f}")

        details=[]
        for x in archX:
            C, Ts = self.unpack(x)
            _, met = self.fitness(self.pack(C, Ts))
            details.append((C, Ts, met))
        return np.stack(archF, axis=0), details

# =========================
# 导出工具
# =========================

def ensure_dir(path:str):
    os.makedirs(path, exist_ok=True)

def plot_fronts(F_on: np.ndarray, F_off: np.ndarray, out_png:str):
    plt.figure(figsize=(7,4.8))
    if F_on.size>0:
        plt.scatter(-F_on[:,0], -F_on[:,1], s=18, label="Sharing ON", alpha=0.85)
    if F_off.size>0:
        plt.scatter(-F_off[:,0], -F_off[:,1], s=18, label="Sharing OFF", alpha=0.85)
    plt.xlabel("Total earnings W (↑ better)")
    plt.ylabel("Mean throughput θ (↑ better)")
    plt.title("Pareto Fronts: Sharing ON vs OFF")
    plt.grid(True, alpha=0.3); plt.legend(); plt.tight_layout()
    plt.savefig(out_png, dpi=200); plt.close()

def pick_representative(F: np.ndarray, wW=1.0, wT=1.0) -> int:
    return int(np.argmin(wW*F[:,0] + wT*F[:,1]))

def save_matrix_csv(path: str, M: np.ndarray, row_prefix="Line", col_prefix="B"):
    df = pd.DataFrame(M)
    df.index = [f"{row_prefix}{i+1}" for i in range(M.shape[0])]
    df.columns = [f"{col_prefix}{j+1}" for j in range(M.shape[1])]
    df.to_csv(path, encoding="utf-8-sig")

def save_plant_tables(plant: Plant, out_dir: str):
    n = len(plant.lines); m = len(plant.lines[0].machines)
    # 机器表
    rows=[]
    for i, line in enumerate(plant.lines):
        for j, mp in enumerate(line.machines):
            rows.append(dict(line=i+1, machine=j+1, omega=mp.omega, MTBF_h=mp.MTBF_h, MTTR_h=mp.MTTR_h))
    pd.DataFrame(rows).to_csv(os.path.join(out_dir, "machines.csv"), index=False, encoding="utf-8-sig")
    # 收益/输入
    rows=[]
    for i, line in enumerate(plant.lines):
        rows.append(dict(line=i+1,
                         earnings_mean=np.mean(line.unit_earnings),
                         earnings_list=";".join([f"{x:.3f}" for x in line.unit_earnings]),
                         input_qty=line.input_qty))
    pd.DataFrame(rows).to_csv(os.path.join(out_dir, "line_income_input.csv"), index=False, encoding="utf-8-sig")
    # 共享拓扑 & 运输成本（逐层）
    for j in range(m-1):
        df_mask = pd.DataFrame(plant.share_mask[:,:,j], columns=[f"L{k+1}" for k in range(n)],
                               index=[f"L{i+1}" for i in range(n)])
        df_mask.to_csv(os.path.join(out_dir, f"share_mask_B{j+1}.csv"), encoding="utf-8-sig")
        df_cost = pd.DataFrame(plant.transport_cost[:,:,j], columns=[f"L{k+1}" for k in range(n)],
                               index=[f"L{i+1}" for i in range(n)])
        df_cost.to_csv(os.path.join(out_dir, f"transport_cost_B{j+1}.csv"), encoding="utf-8-sig")

def save_pareto_csv(path: str, F: np.ndarray):
    if F.size==0:
        pd.DataFrame(columns=["W","theta"]).to_csv(path, index=False, encoding="utf-8-sig")
        return
    df = pd.DataFrame({"W": -F[:,0], "theta": -F[:,1]})
    df.to_csv(path, index=False, encoding="utf-8-sig")

def save_metrics_json(path:str, metrics:Dict):
    with open(path, "w", encoding="utf-8") as f:
        json.dump({k:(float(v) if np.isscalar(v) else np.array(v).tolist())
                   for k,v in metrics.items()}, f, ensure_ascii=False, indent=2)

def write_report_md(out_dir:str, context:Dict):
    md = []
    md.append("# Report: Cross-line WIP Sharing Optimization (Paper-style)\n")
    md.append("## 1. Data Generation\n")
    md.append("- **Machines**: service rate ω (units/hour), reliability MTBF/MTTR.\n"
              "- **Lines**: five-category unit earnings (mean used), input quantity for θ normalization.\n"
              "- **Topology**: sharing mask on selected buffers (B2/B4/B6 all interline; B3/B5 only L2↔L3; B7 only L1↔L3).\n"
              "- **Transport cost**: integers in [4,6] where sharing is allowed.\n")
    md.append("这些字段可一键替换为你的真实表格，脚本将保持不变直接复现全部结果。\n")

    md.append("## 2. Analytical Evaluation (No Simulation)\n")
    md.append("核心映射：可用度 A、稳定度 Sa、空槽率 Pv、库存率 Pt、空闲/阻塞 Pu/Ps、到达率 Rb/Rm；串行瓶颈取 min 得到有效产出率，\n"
              "再归一化得到 θ；收益 W=单位收益×产出 - (Ts×运输成本×缩放)。\n")

    md.append("## 3. Optimization (C-MOHFA style, simplified)\n")
    md.append("- 决策：缓冲容量 C[i,j]（整数，总量≤C_max_total）、传输率 Ts[i,j]∈[0,1]（受拓扑掩码约束）。\n"
              "- 目标：最大化 W 与 θ（实现为最小化 -W 与 -θ）。\n"
              "- 算法：非支配排序+拥挤距离；个体间萤火吸引移动+高斯扰动；对 Ts 做小步 SA 邻域微调；维护第一前沿档案。\n"
              "- 两个场景：Sharing=ON 与 Sharing=OFF 分别优化，前沿对比。\n")

    md.append("## 4. Outputs\n")
    md.append("- `machines.csv` / `line_income_input.csv`：工厂参数表\n"
              "- `share_mask_B*.csv` / `transport_cost_B*.csv`：共享拓扑与运输成本（分层）\n"
              "- `pareto_on.csv` / `pareto_off.csv`：帕累托前沿点 (W, θ)\n"
              "- `pareto.png`：ON vs OFF 前沿散点\n"
              "- `C_on.csv` / `Ts_on.csv` / `metrics_on.json`：代表性解（共享ON）\n"
              "- `C_off.csv` / `Ts_off.csv` / `metrics_off.json`：代表性解（共享OFF）\n")

    md.append("## 5. Interpretation: Representativeness & Roles\n")
    md.append("- **ω (service rate)**：反映工序产能上限，是瓶颈定位与缓冲分配的基础。\n"
              "- **MTBF/MTTR → Availability A**：设备可靠性决定有效产能；A越低，越需要缓冲吸收波动。\n"
              "- **C (buffer capacity)**：在上下游速率不匹配时提供去耦；总量约束下的分配体现了“在哪些层更需要缓冲”。\n"
              "- **Ts (transfer rate)**：跨线共享的强度，提升饱和线的供给或释放拥堵，代价是运输成本与潜在扰动。\n"
              "- **W (earnings)**：将产能最终映射为经济指标（扣除跨线代价）。\n"
              "- **θ (throughput)**：标准化的产出能力，便于跨线/跨场景对比。\n")

    md.append("## 6. Results Summary\n")
    md.append(f"- 前沿点数（ON）：{context['on_points']}；前沿点数（OFF）：{context['off_points']}\n")
    if context.get("on_summary") and context.get("off_summary"):
        md.append(f"- 代表性解（ON）W={context['on_summary']['W']:.2f}, θ={context['on_summary']['theta']:.4f}\n")
        md.append(f"- 代表性解（OFF）W={context['off_summary']['W']:.2f}, θ={context['off_summary']['theta']:.4f}\n")
        md.append(f"- ON 相对 OFF 的收益差 ΔW={context['on_summary']['W']-context['off_summary']['W']:.2f}；"
                  f"吞吐差 Δθ={context['on_summary']['theta']-context['off_summary']['theta']:.4f}\n")

    md.append("## 7. How to Replace with Your Real Data\n")
    md.append("- 将 `build_demo_plant()` 内的随机生成替换为：\n"
              "  `machines.csv`（每行：line,machine,omega,MTBF_h,MTTR_h）、\n"
              "  `line_income_input.csv`（line,earnings_mean,input_qty 或直接 earnings_list）、\n"
              "  `share_mask_B*.csv` 与 `transport_cost_B*.csv`（每个缓冲层 j 一张 n×n 表）。\n"
              "  然后用相同流程运行即可生成新报告。\n")

    md.append("## 8. Improvement Points（对应论文可扩展）\n")
    md.append("- **更精确的解析模型**：将 Pu/Ps 由启发式代理替换为更严格的 Markov/队列近似（如 M/M/1/K 串联近似）。\n"
              "- **多目标权重与公平性**：在 W/θ外加入 WIP、在制时间、能耗或跨线稳定性（方差/峰值）。\n"
              "- **鲁棒/灵敏度分析**：对 ω、A、cost 做±x% 扰动，输出置信带与参数敏感性。\n"
              "- **‘共享 ON/OFF’之外的策略**：仅对若干层开放共享、设置 Ts 上限、按时段/拥堵动态调整 Ts。\n"
              "- **真实数据驱动**：把拓扑、工艺约束、切换/换型成本、班次/停机窗纳入约束集，提升落地效果。\n")

    with open(os.path.join(out_dir, "README_report.md"), "w", encoding="utf-8") as f:
        f.write("\n".join(md))


# =========================
# 主流程
# =========================

def main():
    # 固定随机种子，可复现
    np.random.seed(0); random.seed(0)

    out_dir = "outputs"
    ensure_dir(out_dir)

    # 1) 生成工厂参数（可替换）
    plant, prm = build_demo_plant(n_lines=3, m_machines=9, seed=7)
    save_plant_tables(plant, out_dir)

    # 保存实验配置
    config = dict(
        n_lines=len(plant.lines),
        m_machines=len(plant.lines[0].machines),
        C_max_total=prm.C_max_total,
        C_min_each=prm.C_min_each,
        time_horizon=prm.time_horizon,
        transport_fee_scale=prm.transport_fee_scale,
        optimizer=dict(pop=80, gens=160, elite=6, seed=1)
    )
    with open(os.path.join(out_dir, "config.json"), "w", encoding="utf-8") as f:
        json.dump(config, f, ensure_ascii=False, indent=2)

    # 2) Sharing ON
    fap = FAParams(pop=80, gens=160, elite=6, seed=1)
    print("\n>>> Optimize with Sharing = ON")
    solver_on  = FireflyMO(plant, prm, fap, sharing_on=True)
    F_on, details_on = solver_on.run(verbose=True)
    save_pareto_csv(os.path.join(out_dir, "pareto_on.csv"), F_on)

    # 3) Sharing OFF
    print("\n>>> Optimize with Sharing = OFF")
    solver_off = FireflyMO(plant, prm, fap, sharing_on=False)
    F_off, details_off = solver_off.run(verbose=True)
    save_pareto_csv(os.path.join(out_dir, "pareto_off.csv"), F_off)

    # 4) 画前沿图
    plot_fronts(F_on, F_off, os.path.join(out_dir, "pareto.png"))

    # 5) 选代表性解（等权重），并导出
    on_summary = None; off_summary = None
    if F_on.size>0:
        idx_on = pick_representative(F_on, prm.w_negW, prm.w_negTheta)
        C_on, Ts_on, met_on = details_on[idx_on]
        save_matrix_csv(os.path.join(out_dir, "C_on.csv"),  C_on, row_prefix="L", col_prefix="B")
        save_matrix_csv(os.path.join(out_dir, "Ts_on.csv"), Ts_on, row_prefix="L", col_prefix="B")
        save_metrics_json(os.path.join(out_dir, "metrics_on.json"), met_on)
        on_summary = dict(W=met_on["W"], theta=met_on["theta"])

    if F_off.size>0:
        idx_off = pick_representative(F_off, prm.w_negW, prm.w_negTheta)
        C_off, Ts_off, met_off = details_off[idx_off]
        save_matrix_csv(os.path.join(out_dir, "C_off.csv"),  C_off, row_prefix="L", col_prefix="B")
        save_matrix_csv(os.path.join(out_dir, "Ts_off.csv"), Ts_off, row_prefix="L", col_prefix="B")
        save_metrics_json(os.path.join(out_dir, "metrics_off.json"), met_off)
        off_summary = dict(W=met_off["W"], theta=met_off["theta"])

    # 6) 写说明文档（材料解释+改进点）
    ctx = dict(
        on_points=(0 if F_on.size==0 else F_on.shape[0]),
        off_points=(0 if F_off.size==0 else F_off.shape[0]),
        on_summary=on_summary,
        off_summary=off_summary
    )
    write_report_md(out_dir, ctx)

    print(f"\nAll materials exported to: {os.path.abspath(out_dir)}")
    print("Files:")
    for fn in sorted(os.listdir(out_dir)):
        print(" -", fn)

if __name__ == "__main__":
    main()
