#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T2.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/18 17:26 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Paper-like reimplementation (pure Python, no simulator):
Cross-line WIP Sharing: Buffer Allocation + Transfer Rates Optimization
----------------------------------------------------------------------
- Deterministic factory parameter generator (capacity, MTBF/MTTR, sharing mask, transport cost)
- Analytical surrogate per paper formulas (availability, stability, vacancy/stock, arrivals)
- Multi-objective Firefly (C-MOHFA style, simplified) to optimize:
    * Buffer capacities C[i,j] (integers; sum <= C_max_total)
    * Cross-line transfer rates Ts[i,j] in [0,1] (masked by topology)
- Objectives to maximize: Total earnings W and Mean throughput θ (we minimize -W and -θ)
- Outputs:
    1) Pareto fronts (Sharing ON/OFF)
    2) Representative solution tables (C, Ts, per-line throughput, total W)
    3) ON vs OFF summary comparison
参数规则：build_demo_plant() 里我用统一规则生成 ω、MTBF、MTTR、共享拓扑与运输成本；这些是确定性可复现的（给定 seed）。你可以按你的工厂，将这些数组替换为真实表格即可。

优化变量：缓冲容量 C[i,j]（整数，合计不超过 C_max_total），以及对应缓冲层级的跨线传输率 Ts[i,j]∈[0,1]（仅在允许共享的缓冲上有效）。

目标：最大化收益 W 与平均吞吐 θ（代码内用 -W 与 -θ 作为最小化目标）。收益考虑单位销售收益与按 Ts*cost*scale 估算的运输代价。

算法：简化 C-MOHFA：非支配排序 + 拥挤距离保档案，个体间吸引移动 + 高斯扰动探索；在固定 C 上对 Ts 做一个小步模拟退火邻域微调，提高收敛质量。

输出：

Pareto 前沿图：W 与 θ 的散点（Sharing ON vs OFF）。

代表性解表：缓冲容量 C、传输率 Ts、各线 θ_i 与总收益 W。

摘要对比：ON vs OFF 的 W、θ 与逐线 θ_i。

如果你要把表格导出成 CSV/Excel或按论文版式生成表 5/6 的格式，我可以在这份代码里加上 csv/pandas 导出；或者如果你把你工厂的真实参数表发我，我会把 build_demo_plant() 替换为读取你的表，并保持其他模块不变，直接产出与论文相同结构的结果与对比图。
Author: you
"""

import math
import random
from dataclasses import dataclass
from typing import List, Dict, Tuple, Optional
import numpy as np
import matplotlib.pyplot as plt

# =========================
# 基本数据结构
# =========================

@dataclass
class MachineParam:
    omega: float     # service rate (units/hour at nominal)
    MTBF_h: float    # mean time between failure (hours)
    MTTR_h: float    # mean time to repair (hours)

@dataclass
class Line:
    machines: List[MachineParam]
    unit_earnings: List[float]
    input_qty: float  # external inputs over horizon (for normalization in theta)

@dataclass
class Plant:
    lines: List[Line]
    share_mask: np.ndarray    # [n, n, m-1]  1 if buffer j of line i can share with line k
    transport_cost: np.ndarray  # [n, n, m-1] cost per transfer (proxy)

# =========================
# 解析近似（映射论文公式）
# =========================

def availability(MTBF_h: float, MTTR_h: float) -> float:
    # A = 1 - MTTR/(MTTR+MTBF)
    tot = MTBF_h + MTTR_h
    if tot <= 0: return 1.0
    return 1.0 - (MTTR_h / tot)

def stability_Sa(omega_up: float, omega_dn: float) -> float:
    # Sa = ω_up / ω_dn
    if omega_dn <= 1e-9: return 10.0
    return max(1e-9, omega_up / omega_dn)

def vacancy_prob(Sa: float, C: int) -> float:
    # Pv = (1 - Sa) / (1 - Sa^{C+1}), limit Sa->1: 1/(C+1)
    Sa = max(1e-6, Sa)
    if abs(Sa - 1.0) < 1e-6:
        return 1.0 / (C + 1.0)
    num, den = (1.0 - Sa), (1.0 - (Sa ** (C + 1)))
    Pv = num / (den if abs(den) > 1e-12 else 1.0)
    return float(np.clip(Pv, 0.0, 1.0))

def stock_prob(Sa: float, Pv: float) -> float:
    # Pt = Sa * Pv
    return float(np.clip(Sa * Pv, 0.0, 1.0))

def idle_block_probs(Sa: float, C: int) -> Tuple[float, float]:
    # Heuristic: when upstream faster (Sa>1) => downstream blocking ↑; Sa<1 => idle ↑
    Pv = vacancy_prob(Sa, C)
    Pt = stock_prob(Sa, Pv)
    Ps = max(0.0, 1.0 - Pv - 1e-6) * 0.5
    Pu = max(0.0, 1.0 - Pt - 1e-6) * 0.5
    return Pu, Ps

def rb_arrival_MB(omega_j: float, Pv: float, A_j: float, Pu: float, Ps: float) -> float:
    # Arrival to buffer from machine j
    return max(0.0, omega_j * (1.0 - Pu - Ps) * A_j * Pv)

def rm_arrival_BM(omega_j: float, Rb_up: float, Pt_up: float, A_j: float, Pu: float, Ps: float) -> float:
    # Arrival to machine j from upstream buffer: bounded by capacity and upstream feed
    cap = omega_j * A_j
    # Use min(feed, cap) heuristic (feed ~ Rb_up)
    return max(0.0, min(Rb_up, cap) * (1.0 - Ps))

def share_probs(Pv_src: float, Pt_dst: float) -> Tuple[float, float]:
    # Transfer propensity proxy
    Ptp_src = 1.0 - (1.0 - Pv_src) * (1.0 - Pt_dst)
    Ptp_dst = 1.0 - (1.0 - Pt_dst) * (1.0 - Pv_src)
    return float(np.clip(Ptp_src, 0.0, 1.0)), float(np.clip(Ptp_dst, 0.0, 1.0))

def update_with_sharing(Rb_src: float, Pv_src: float, A_src: float,
                        Rb_dst_down: float, Pt_dst: float, Pu_src: float, Ps_src: float,
                        Ts_src: float, mask_active: bool) -> float:
    # Sharing adds a gain proportional to vacancy, dest stock and Ts
    if not mask_active or Ts_src <= 1e-9:
        return Rb_src
    Ptp_src, _ = share_probs(Pv_src, Pt_dst)
    share_gain = (1.0 - Pu_src - Ps_src) * A_src * Ptp_src * Ts_src
    return max(0.0, Rb_src + share_gain)

@dataclass
class EvalParams:
    C_max_total: int
    C_min_each: int
    time_horizon: float          # hours
    transport_fee_scale: float   # scale Ts*cost to money
    w_negW: float = 1.0
    w_negTheta: float = 1.0

def evaluate_solution(plant: Plant,
                      C: np.ndarray,    # (n, m-1) int
                      Ts: np.ndarray,   # (n, m-1) float in [0,1]
                      prm: EvalParams,
                      sharing_on: bool = True) -> Tuple[Tuple[float,float], Dict]:
    """
    Returns:
      (f1, f2) to MINIMIZE, where f1=-W, f2=-theta_mean
      metrics dict: W, theta_mean, theta_i, eff_rate
    """
    n = len(plant.lines)
    m = len(plant.lines[0].machines)
    T = prm.time_horizon
    share_mask = plant.share_mask if sharing_on else np.zeros_like(plant.share_mask)
    cost = plant.transport_cost

    # per machine params
    A = np.zeros((n, m))
    om = np.zeros((n, m))
    for i, line in enumerate(plant.lines):
        for j, mp in enumerate(line.machines):
            A[i, j]  = availability(mp.MTBF_h, mp.MTTR_h)
            om[i, j] = mp.omega

    Pv = np.zeros((n, m-1)); Pt = np.zeros((n, m-1))
    Pu = np.zeros((n, m-1)); Ps = np.zeros((n, m-1))
    Rb = np.zeros((n, m-1)); Rm = np.zeros((n, m))

    # MB cells: compute Pv, Pt, Pu, Ps, and Rb from M->B
    for i in range(n):
        for j in range(m-1):
            Sa = stability_Sa(om[i, j], om[i, j+1])
            Pv[i, j] = vacancy_prob(Sa, int(C[i, j]))
            Pt[i, j] = stock_prob(Sa, Pv[i, j])
            Pu[i, j], Ps[i, j] = idle_block_probs(Sa, int(C[i, j]))
            Rb[i, j] = rb_arrival_MB(om[i, j], Pv[i, j], A[i, j], Pu[i, j], Ps[i, j])

    # Apply sharing to Rb by Ts (per buffer level j across lines)
    if sharing_on:
        for j in range(m-1):
            Rb_new = Rb[:, j].copy()
            for i in range(n):
                for k in range(n):
                    if k == i: continue
                    if share_mask[i, k, j] <= 0: continue
                    Rb_dst_down = om[k, j+1] * A[k, j+1]
                    Rb_new[i] = update_with_sharing(
                        Rb_src=Rb_new[i],
                        Pv_src=Pv[i, j],
                        A_src=A[i, j],
                        Rb_dst_down=Rb_dst_down,
                        Pt_dst=Pt[k, j],
                        Pu_src=Pu[i, j],
                        Ps_src=Ps[i, j],
                        Ts_src=Ts[i, j],
                        mask_active=True
                    )
            Rb[:, j] = Rb_new

    # Arrivals to machines
    for i in range(n):
        # external to M1 approximated by its own capacity
        Rm[i, 0] = om[i, 0] * A[i, 0]
        for j in range(1, m):
            Rm[i, j] = rm_arrival_BM(om[i, j], Rb[i, j-1], Pt[i, j-1], A[i, j], Pu[i, j-1], Ps[i, j-1])

    eff_rate = np.min(Rm, axis=1)  # serial bottleneck
    theta_i = np.zeros(n)
    for i, line in enumerate(plant.lines):
        In = max(1e-6, line.input_qty)
        theta_i[i] = eff_rate[i] / In
    theta_mean = float(np.mean(theta_i))

    # Earnings W: unit_price * output - transport costs (proxy)
    W_lines = np.zeros(n)
    trans_cost = 0.0
    for i, line in enumerate(plant.lines):
        unit_price = float(np.mean(line.unit_earnings)) if line.unit_earnings else 1.0
        W_lines[i] = unit_price * eff_rate[i] * T
        # proxy of transfers by Ts * cost
        for j in range(m-1):
            for k in range(n):
                if k == i: continue
                if share_mask[i, k, j] > 0:
                    trans_cost += Ts[i, j] * cost[i, k, j] * prm.transport_fee_scale

    W_total = float(np.sum(W_lines) - trans_cost)

    f1 = -W_total
    f2 = -theta_mean
    metrics = dict(W=W_total, theta=theta_mean, theta_i=theta_i, eff_rate=eff_rate)
    return (f1, f2), metrics

# =========================
# 工厂参数：规则化自动生成
# =========================

def build_demo_plant(n_lines=3, m_machines=9, seed=7) -> Tuple[Plant, EvalParams]:
    rng = np.random.RandomState(seed)

    lines: List[Line] = []
    # 规则：每台机器的服务率 ω 从区间抽样（越靠中段越高），MTBF/MTTR 也按范围抽样
    for i in range(n_lines):
        machines = []
        for j in range(m_machines):
            # 产能（件/小时）：基础 8~22/min，折算小时并加上轻微线差
            low_per_min  = rng.randint(8, 14) + i
            high_per_min = rng.randint(15, 24) + i
            if high_per_min <= low_per_min: high_per_min = low_per_min + 2
            omega = 0.5 * (low_per_min + high_per_min) * 60.0 / 3600.0  # per hour
            # 可靠性：MTBF 在 [240, 360] 小时；MTTR 在 [20, 35] 分钟
            MTBF_h = float(rng.randint(240, 360))
            MTTR_h = float(rng.uniform(20.0, 35.0)) / 60.0
            machines.append(MachineParam(omega=omega, MTBF_h=MTBF_h, MTTR_h=MTTR_h))
        # 收益（单位产品收益，按品类均值）
        unit_earnings = list(3.5 + 1.5 * rng.rand(5))
        # 外部投料量（归一化尺度）
        input_qty = float(8000 + 2000 * rng.rand())
        lines.append(Line(machines=machines, unit_earnings=unit_earnings, input_qty=input_qty))

    # 共享拓扑规则：B2/B4/B6 全互联；B3/B5 只在 L2/L3 间；B7 在 L1/L3 间（与论文示例类似）
    n = n_lines; m = m_machines
    share_mask = np.zeros((n, n, m-1), dtype=int)
    for i in range(n):
        for k in range(n):
            if i == k: continue
            for j in [1,3,5]:   # B2,B4,B6
                share_mask[i,k,j] = 1
            for j in [2,4]:     # B3,B5 仅 L2<->L3
                if (i in [1,2]) and (k in [1,2]):
                    share_mask[i,k,j] = 1
            for j in [6]:       # B7 仅 L1<->L3
                if (i in [0,2]) and (k in [0,2]):
                    share_mask[i,k,j] = 1

    # 跨线运输成本（规则：允许的边赋常数 4~6，其余0）
    cost = np.zeros((n, n, m-1), dtype=float)
    for i in range(n):
        for k in range(n):
            if i == k: continue
            for j in range(m-1):
                if share_mask[i,k,j] == 1:
                    cost[i,k,j] = float(rng.randint(4,6))

    plant = Plant(lines=lines, share_mask=share_mask, transport_cost=cost)

    # 评价参数（总容量、每格最小、时段、费用缩放）
    prm = EvalParams(
        C_max_total=1500,
        C_min_each=5,
        time_horizon=24.0,       # 1天
        transport_fee_scale=10.0,
        w_negW=1.0, w_negTheta=1.0
    )
    return plant, prm

# =========================
# 多目标萤火虫（C-MOHFA 风格，简化）
# =========================

@dataclass
class FAParams:
    pop: int = 80
    gens: int = 160
    elite: int = 6
    attr0: float = 1.0
    gamma: float = 1.0
    kappa_hi: float = 0.15
    kappa_lo: float = 0.05
    sa_T0: float = 60.0
    sa_alpha: float = 0.97
    seed: int = 1

def fast_non_dominated_sort(F: np.ndarray) -> List[List[int]]:
    P, M = F.shape
    S = [[] for _ in range(P)]
    n = np.zeros(P, dtype=int)
    rank = np.zeros(P, dtype=int)
    fronts = [[]]
    for p in range(P):
        for q in range(P):
            if p == q: continue
            if np.all(F[p] <= F[q]) and np.any(F[p] < F[q]):
                S[p].append(q)
            elif np.all(F[q] <= F[p]) and np.any(F[q] < F[p]):
                n[p] += 1
        if n[p] == 0:
            rank[p] = 0
            fronts[0].append(p)
    i = 0
    while len(fronts[i]) > 0:
        Q = []
        for p in fronts[i]:
            for q in S[p]:
                n[q] -= 1
                if n[q] == 0:
                    rank[q] = i + 1
                    Q.append(q)
        i += 1
        fronts.append(Q)
    fronts.pop()
    return fronts

def crowding_distance(front: List[int], F: np.ndarray) -> np.ndarray:
    if len(front) == 0: return np.array([])
    m = F.shape[1]
    cd = np.zeros(len(front), dtype=float)
    idx = np.array(front, dtype=int)
    for k in range(m):
        order = np.argsort(F[idx, k])
        val = F[idx[order], k]
        cd[order[0]] = cd[order[-1]] = 1e9
        denom = max(1e-12, val[-1] - val[0])
        for i in range(1, len(front)-1):
            cd[order[i]] += (val[i+1] - val[i-1]) / denom
    return cd

class FireflyMO:
    def __init__(self, plant: Plant, prm: EvalParams, fap: FAParams, sharing_on: bool):
        self.plant = plant
        self.prm = prm
        self.fap = fap
        self.n = len(plant.lines)
        self.m = len(plant.lines[0].machines)
        self.sharing_on = sharing_on

        self.rng = np.random.RandomState(fap.seed)
        self.Cmin = prm.C_min_each
        self.Cmax_total = prm.C_max_total

        self.nvarC = self.n * (self.m - 1)
        self.nvarTs = self.n * (self.m - 1)

        # Ts 允许掩码（该行在该缓冲是否存在任何共享连接）
        allow = np.zeros((self.n, self.m - 1), dtype=float)
        for i in range(self.n):
            for j in range(self.m - 1):
                allow[i,j] = 1.0 if np.any(self.plant.share_mask[i, :, j] > 0) else 0.0
        self.Ts_allow = allow if self.sharing_on else np.zeros_like(allow)

    # --- 初始化 ---
    def init_random(self):
        C = self.rng.randint(self.Cmin, self.Cmin + 20, size=(self.n, self.m - 1))
        self._scale_C_total(C)
        Ts = self.rng.rand(self.n, self.m - 1) * self.Ts_allow
        return C, Ts

    def init_gauss(self):
        C = np.zeros((self.n, self.m - 1), dtype=int)
        for i in range(self.n):
            x = np.linspace(0, 1, self.m - 1)
            mu = self.rng.uniform(0.3, 0.7)
            sigma = self.rng.uniform(0.12, 0.25)
            w = np.exp(-0.5*((x-mu)**2)/(sigma**2))
            w = (w + 1e-3) / np.sum(w + 1e-3)
            alloc = (w * (self.Cmax_total / self.n)).astype(int)
            C[i] = np.maximum(alloc, self.Cmin)
        self._scale_C_total(C)
        Ts = 0.3 * np.ones((self.n, self.m - 1)) * self.Ts_allow
        return C, Ts

    def init_rate_based(self):
        C = np.zeros((self.n, self.m - 1), dtype=int)
        for i, ln in enumerate(self.plant.lines):
            up = np.array([ln.machines[j].omega for j in range(self.m - 1)], dtype=float)
            w = up / max(1e-9, np.sum(up))
            alloc = (w * (self.Cmax_total / self.n)).astype(int)
            C[i] = np.maximum(alloc, self.Cmin)
        self._scale_C_total(C)
        Ts = 0.5 * np.ones((self.n, self.m - 1)) * self.Ts_allow
        return C, Ts

    def _scale_C_total(self, C: np.ndarray):
        C[C < self.Cmin] = self.Cmin
        tot = int(np.sum(C))
        if tot <= self.Cmax_total: return
        over = tot - self.Cmax_total
        idxs = [(i,j) for i in range(self.n) for j in range(self.m-1)]
        idxs.sort(key=lambda ij: C[ij[0], ij[1]], reverse=True)
        for (i,j) in idxs:
            if over <= 0: break
            reducible = C[i,j] - self.Cmin
            if reducible <= 0: continue
            d = min(reducible, max(1, over//4))
            C[i,j] -= d
            over -= d
        while np.sum(C) > self.Cmax_total:
            for (i,j) in idxs:
                if np.sum(C) <= self.Cmax_total: break
                if C[i,j] > self.Cmin:
                    C[i,j] -= 1

    def pack(self, C, Ts): return np.concatenate([C.flatten().astype(float), Ts.flatten().astype(float)], axis=0)
    def unpack(self, x):
        C = x[:self.nvarC].reshape(self.n, self.m-1)
        Ts = x[self.nvarC:].reshape(self.n, self.m-1)
        return C.astype(int), Ts

    def fitness(self, x) -> Tuple[np.ndarray, Dict]:
        C, Ts = self.unpack(x)
        C_rep = C.copy(); self._scale_C_total(C_rep)
        Ts = np.clip(Ts, 0.0, 1.0) * self.Ts_allow
        (f1, f2), met = evaluate_solution(self.plant, C_rep, Ts, self.prm, sharing_on=self.sharing_on)
        return np.array([f1, f2], dtype=float), met

    def refine_Ts_SA(self, C, Ts_init):
        Ts = Ts_init.copy()
        allow = self.Ts_allow
        T = self.fap.sa_T0
        best_Ts = Ts.copy()
        best_F, _ = self.fitness(self.pack(C, Ts))
        for _ in range(40):
            nbh = Ts + self.rng.randn(*Ts.shape) * 0.08
            nbh = np.clip(nbh, 0.0, 1.0) * allow
            F_new, _ = self.fitness(self.pack(C, nbh))
            if (F_new <= best_F).all() or self.rng.rand() < math.exp(-np.sum(F_new - best_F) / max(1e-6, T)):
                Ts = nbh
                if (F_new <= best_F).all():
                    best_F = F_new; best_Ts = Ts.copy()
            T *= self.fap.sa_alpha
        return best_Ts

    def run(self, verbose=True):
        P, G = self.fap.pop, self.fap.gens
        inits = []
        for _ in range(P//3): inits.append(self.init_random())
        for _ in range(P//3): inits.append(self.init_gauss())
        while len(inits) < P: inits.append(self.init_rate_based())
        X = np.stack([self.pack(C, Ts) for C,Ts in inits], axis=0)

        F = np.zeros((P,2)); MET=[None]*P
        for i in range(P): F[i], MET[i] = self.fitness(X[i])

        # archive for first front
        archX=[]; archF=[]
        def update_archive(Xlist, Flist):
            nonlocal archX, archF
            allX = archX + [x.copy() for x in Xlist]
            allF = archF + [f.copy() for f in Flist]
            Farr = np.stack(allF, axis=0)
            fronts = fast_non_dominated_sort(Farr)
            newX, newF = [], []
            for idx in fronts[0]:
                newX.append(allX[idx].copy()); newF.append(allF[idx].copy())
            archX, archF = newX, newF

        update_archive(list(X), list(F))

        for g in range(1, G+1):
            fronts = fast_non_dominated_sort(F)
            elite_idx = []
            for fr in fronts:
                if len(elite_idx) >= self.fap.elite: break
                cd = crowding_distance(fr, F); order = np.argsort(-cd)
                for k in order:
                    if len(elite_idx) < self.fap.elite:
                        elite_idx.append(fr[k])

            elites = X[elite_idx].copy()
            attr0, gamma = self.fap.attr0, self.fap.gamma
            kappa = self.fap.kappa_hi - (self.fap.kappa_hi - self.fap.kappa_lo) * (g / G)

            Y = []
            while len(Y) < P - self.fap.elite:
                a, b = self.rng.randint(0, P), self.rng.randint(0, P)
                xa, xb = X[a].copy(), X[b].copy()
                Fa, Fb = F[a], F[b]
                move_from, move_to = (xa, xb) if np.any(Fa > Fb) else (xb, xa)
                Ca, Ta = self.unpack(move_from)
                Cb, Tb = self.unpack(move_to)
                d = np.linalg.norm(move_from - move_to)
                attr = attr0 * math.exp(-gamma * (d**2))
                Cnew = Ca + attr*(Cb - Ca) + self.rng.randn(*Ca.shape)*kappa*10.0
                Tsnew = Ta + attr*(Tb - Ta) + self.rng.randn(*Ta.shape)*kappa
                Cnew = np.maximum(self.Cmin, np.round(Cnew)).astype(int)
                self._scale_C_total(Cnew)
                Tsnew = np.clip(Tsnew, 0.0, 1.0) * self.Ts_allow
                Tsnew = self.refine_Ts_SA(Cnew, Tsnew)
                Y.append(self.pack(Cnew, Tsnew))

            Xnew = np.vstack([elites] + Y)
            Fnew = np.zeros_like(F); METnew=[None]*P
            for i in range(P): Fnew[i], METnew[i] = self.fitness(Xnew[i])

            # survivor selection: rank + crowding
            Xall = np.vstack([X, Xnew]); Fall = np.vstack([F, Fnew])
            fronts_all = fast_non_dominated_sort(Fall)
            Z=[]
            for fr in fronts_all:
                if len(Z)+len(fr) <= P:
                    Z += fr
                else:
                    cd = crowding_distance(fr, Fall)
                    need = P - len(Z)
                    order = np.argsort(-cd)[:need]
                    Z += [fr[k] for k in order]
                    break
            X = Xall[Z]; F = Fall[Z]
            update_archive(list(X), list(F))

            if verbose and (g % 20 == 0 or g == 1):
                bestW = -np.min(F[:,0]); bestTh = -np.min(F[:,1])
                print(f"[Gen {g:4d} | {'ON' if self.sharing_on else 'OFF'}] front={len(archX)}  best W≈{bestW:.2f}  θ≈{bestTh:.4f}")

        # decode archive
        details=[]
        for x in archX:
            C, Ts = self.unpack(x)
            _, met = self.fitness(self.pack(C, Ts))
            details.append((C, Ts, met))
        return np.stack(archF, axis=0), details

# =========================
# 报表与可视化
# =========================

def plot_fronts(F_on: np.ndarray, F_off: np.ndarray):
    plt.figure(figsize=(6.6,4.8))
    if F_on.size>0:
        plt.scatter(-F_on[:,0], -F_on[:,1], s=18, label="Sharing ON", alpha=0.8)
    if F_off.size>0:
        plt.scatter(-F_off[:,0], -F_off[:,1], s=18, label="Sharing OFF", alpha=0.8)
    plt.xlabel("Total earnings W (↑ better)")
    plt.ylabel("Mean throughput θ (↑ better)")
    plt.title("Pareto Fronts: Sharing ON vs OFF")
    plt.grid(True, alpha=0.3); plt.legend(); plt.tight_layout(); plt.show()

def pick_representative(F: np.ndarray, wW=1.0, wT=1.0) -> int:
    # pick argmin of weighted sum of (-W,-θ)
    return int(np.argmin(wW*F[:,0] + wT*F[:,1]))

def print_matrix(name:str, M: np.ndarray, fmt="{:6.2f}"):
    print(f"\n{name}:")
    for i in range(M.shape[0]):
        row = " ".join([fmt.format(v) for v in M[i]])
        print(f"Line {i+1:>2d} | {row}")

def print_solution_tables(tag:str, C: np.ndarray, Ts: np.ndarray, met: Dict):
    print(f"\n==== Representative Solution [{tag}] ====")
    print_matrix("Buffer capacities C (per line × buffer)", C.astype(float), fmt="{:6.0f}")
    if np.max(Ts) > 0:
        # 只展示非零 Ts
        Ts_show = Ts.copy()
        print_matrix("Transfer rates Ts (0~1)", Ts_show, fmt="{:6.2f}")
    else:
        print("\nTransfer rates Ts: (all zeros)")

    # per-line throughput & total W
    theta_i = met["theta_i"]; W = met["W"]; eff_rate = met["eff_rate"]
    print("\nPer-line throughput and effective rate:")
    for i in range(len(theta_i)):
        print(f"  Line {i+1}: θ_i={theta_i[i]:.4f}, eff_rate={eff_rate[i]:.2f} units/h")
    print(f"Total W = {W:.2f}")

def compare_summary(met_on: Dict, met_off: Dict, tag_on="ON", tag_off="OFF"):
    print("\n==== Sharing ON vs OFF Summary ====")
    print(f"W:   ON={met_on['W']:.2f}   OFF={met_off['W']:.2f}   Δ={met_on['W']-met_off['W']:.2f}")
    print(f"θ:   ON={met_on['theta']:.4f} OFF={met_off['theta']:.4f} Δ={met_on['theta']-met_off['theta']:.4f}")
    print("Per-line θ_i (ON | OFF):")
    for i in range(len(met_on["theta_i"])):
        print(f"  Line {i+1}: {met_on['theta_i'][i]:.4f} | {met_off['theta_i'][i]:.4f}")

# =========================
# 主流程
# =========================

def main():
    # 1) 造工厂参数（规则可复现）
    plant, prm = build_demo_plant(n_lines=3, m_machines=9, seed=7)

    # 2) 分别在 Sharing ON / OFF 上优化
    fap = FAParams(pop=80, gens=160, elite=6, seed=1)

    print("\n>>> Optimize with Sharing = ON")
    solver_on  = FireflyMO(plant, prm, fap, sharing_on=True)
    F_on, details_on = solver_on.run(verbose=True)

    print("\n>>> Optimize with Sharing = OFF")
    solver_off = FireflyMO(plant, prm, fap, sharing_on=False)
    F_off, details_off = solver_off.run(verbose=True)

    # 3) 画非支配前沿对比
    plot_fronts(F_on, F_off)

    # 4) 选一个代表性解（等权重）并打印表格
    if F_on.size>0:
        idx_on  = pick_representative(F_on, prm.w_negW, prm.w_negTheta)
        C_on, Ts_on, met_on = details_on[idx_on]
        print_solution_tables("Sharing ON", C_on, Ts_on, met_on)
    if F_off.size>0:
        idx_off = pick_representative(F_off, prm.w_negW, prm.w_negTheta)
        C_off, Ts_off, met_off = details_off[idx_off]
        print_solution_tables("Sharing OFF", C_off, Ts_off, met_off)

    # 5) ON vs OFF 摘要对比
    if F_on.size>0 and F_off.size>0:
        compare_summary(met_on, met_off)

if __name__ == "__main__":
    main()
