#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T1.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/18 16:27 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Multi-objective Firefly Algorithm (C-MOHFA-like, simplified) for
Buffer Allocation with Cross-line WIP Sharing (pure Python, no simulation)

This code re-implements the paper's core model & algorithmic ideas WITHOUT any simulator:
- Model (per paper): unreliable production lines, buffers between machines, cross-line WIP sharing
  Variables:
    * Buffer capacities C[i][j] (integers, bounded, total capacity <= C_max)
    * Sharing transfer rates Ts[i][j] in [0,1] for eligible buffers
  Core formulas (mapped from the paper):
    * Stability Sa, vacancy Pv, stock Pt for M-B-M cells
    * Availability A from MTBF/MTTR
    * Actual arrival rates Rb (buffer), Rm (machine), and effective rate R
    * Sharing-adjusted Rb, Rm with Ptp
    * Objectives: maximize total earnings W and mean throughput θ (we minimize -W and -θ)
    * Constraints: sum(C[i][j]) <= C_max, C[i][j] in [Cmin, Cmax_ij], 0<=Ts<=1 with topology mask
- Algorithm: a simplified Clustered-Multiobjective Hybrid Firefly (C-MOHFA) flavor
  including:
    * Mixed initialization (random / heuristic-by-capacity / heuristic-by-upstream-rate)
    * Non-dominated sorting + crowding distance
    * Firefly movement in joint space (C, Ts) with attractiveness & Gaussian perturbation
    * Archive of non-dominated solutions
    * SA+local neighborhood search used to refine Ts for a given C
    * Periodic sub-pop exchange to boost diversity
- Demo instance: 3 lines × 9 machines with sample MTBF/MTTR and capacity ranges.
  Transfer cost matrix is included; per the paper, W combines earnings minus transport costs.

Author: You

怎么用 & 怎么改

直接运行：会在一个 3×9 的示例工厂上跑多目标萤火虫，给出非支配前沿（W 与 θ）并打印一个代表性解的缓冲区容量与部分传输率。

改成你的实例：把 build_demo_plant() 里每条产线的 omega、MTBF/MTTR、可共享的缓冲区拓扑与跨线运输成本改成你的数据即可；C_max_total 等约束也在这里调。

论文一致性：目标和约束、到达率/可用度/稳定度/共享修正都基于文中公式做了解析近似，不再依赖仿真器（Plant Simulation/Matlab）。算法也保留了混合初始化 + 非支配排序 + 火萤吸引 + 高斯扰动 + SA邻域搜索（针对传输率 Ts）+ 档案前沿等关键机制。

小论文0301

扩展：

想严格复刻论文的多群体并行&信息交换，可在 run() 里维护多个子群、按迭代周期执行“子群抽样—跨群迁移—拥挤度筛选”；

若要输出缓冲区与传输量的表（表 5/6 同款），把 C 与 Ts/mask整理成表格即可；

若要画两方案前沿对比（如论文“共享 vs 非共享”），跑两次：一次正常；一次把 share_mask 全置 0 即可。

如果你把你工厂的参数表（各工序产能上下限、MTBF/MTTR、共享关系与运输成本、收益/时段）发我，我可以把这份代码替你定制成你的实例版，并输出和论文相同格式的结果表与对比图。
"""

import math
import random
from dataclasses import dataclass
from typing import List, Dict, Tuple, Optional
import numpy as np
import matplotlib.pyplot as plt

# ------------------------------
# Data structures
# ------------------------------

@dataclass
class MachineParam:
    # Mean service rate ω_ij (units/time). We use a representative mean within [low, high].
    omega: float
    # MTBF (hours) and MTTR (hours) for availability
    MTBF_h: float
    MTTR_h: float

@dataclass
class Line:
    # Machines M1..Mm; buffers B1..B_{m-1} (buffer after each machine except the last)
    machines: List[MachineParam]
    # Product unit earnings by product type (used to compute W; in demo we average)
    unit_earnings: List[float]
    # External: raw input count In over horizon T (proxy for W/θ scaling)
    input_qty: float

@dataclass
class Plant:
    lines: List[Line]
    # Cross-line sharing eligibility mask for buffers: shape (n_lines, n_lines, m-1)
    # share_mask[i, k, j] = 1 if buffer j of line i can share with buffer j of line k (i != k)
    share_mask: np.ndarray
    # Transport cost per transfer between lines for each buffer j: shape (n_lines, n_lines, m-1)
    # cost[i,k,j] cost for moving one transfer event (proxy).
    transport_cost: np.ndarray

# ------------------------------
# Utility & model equations (mapped from paper)
# ------------------------------

def availability(MTBF_h: float, MTTR_h: float) -> float:
    """A_ij = 1 - MTTR/(MTTR+MTBF)  (Eq.4 remapped)"""
    if MTBF_h + MTTR_h <= 0:
        return 1.0
    return 1.0 - (MTTR_h / (MTBF_h + MTTR_h))

def stability_Sa(omega_up: float, omega_dn: float) -> float:
    """Sa = ω_up / ω_dn  (Eq.3)"""
    if omega_dn <= 1e-9:
        return 10.0
    return max(1e-9, omega_up / omega_dn)

def vacancy_prob(Sa: float, C: int) -> float:
    """Pv = (1 - Sa) / (1 - Sa^{C+1}) for M-B-M cell steady-state vacancy probability (Eq.1 flavor).
       We use a bounded formulation for Sa≈1 to avoid numerical issues.
    """
    Sa = max(1e-6, Sa)
    if abs(Sa - 1.0) < 1e-6:
        # Limit as Sa->1: Pv ≈ 1/(C+1)
        return 1.0 / (C + 1.0)
    num = 1.0 - Sa
    den = 1.0 - (Sa ** (C + 1))
    Pv = num / den
    return float(np.clip(Pv, 0.0, 1.0))

def stock_prob(Sa: float, Pv: float) -> float:
    """Pt = Sa * Pv  (Eq.2)"""
    return float(np.clip(Sa * Pv, 0.0, 1.0))

def rb_arrival_MB(omega_ij: float, Pv: float, Aij: float, Pu: float, Ps: float) -> float:
    """Rb for M->B considering availability and first-class states (Eq.7).
       We interpret Pu, Ps as idle/block prob proxies (heuristic close forms below).
    """
    return max(0.0, omega_ij * (1.0 - Pu - Ps) * Aij * Pv)

def rm_arrival_BM(omega_ij: float, Pt_up: float, Aij: float, Pu: float, Ps: float) -> float:
    """Rm for B->M considering availability (Eq.9)."""
    den = max(1e-9, (1.0 - Pu - Ps))
    # When machine is workable, upstream non-empty drives arrival
    return max(0.0, omega_ij * (1.0 - Ps) * Aij * Pt_up / den)

def idle_block_probs(Sa: float, C: int) -> Tuple[float, float]:
    """Heuristic idle/block probs from Eqs.(6) & (10) qualitative behavior.
       When Sa>1 (upstream faster): buffer tends to be full => downstream blocking
       When Sa<1 (downstream faster): buffer tends to be empty => machine idle
    """
    Pv = vacancy_prob(Sa, C)
    Pt = stock_prob(Sa, Pv)
    # Block if buffer is full; Idle if upstream buffer empty.
    Ps = max(0.0, 1.0 - Pv - 1e-6) * 0.5    # scaled proxy
    Pu = max(0.0, 1.0 - Pt - 1e-6) * 0.5
    return Pu, Ps

def share_probs(Pv_src: float, Pt_dst: float) -> Tuple[float, float]:
    """Ptp for source buffer (Eq.16): Pv>0 and dest Pt<1 favors transfer.
       We also define symmetric for destination as in Eq.(17) notation.
    """
    Ptp_src = 1.0 - (1.0 - Pv_src) * (1.0 - Pt_dst)  # 1-(no vacancy)*(no stock) ~ chance to share
    Ptp_dst = 1.0 - (1.0 - Pt_dst) * (1.0 - Pv_src)
    return float(np.clip(Ptp_src, 0.0, 1.0)), float(np.clip(Ptp_dst, 0.0, 1.0))

def update_with_sharing(Rb_src: float, Pv_src: float, A_src: float,
                        Rb_dst_down: float, Pt_dst: float, Pu_src: float, Ps_src: float,
                        Ts_src: float, mask_active: bool) -> float:
    """Adjust Rb with sharing term (Eq.15 vibe). Ts acts as proportional factor within [0,1]."""
    if not mask_active or Ts_src <= 1e-9:
        return Rb_src
    Ptp_src, _ = share_probs(Pv_src, Pt_dst)
    # Effective sharing lift: proportional to vacancy & dest stock & Ts
    share_gain = (1.0 - Pu_src - Ps_src) * A_src * Ptp_src * Ts_src
    return max(0.0, Rb_src + share_gain)

# ------------------------------
# Objective evaluation
# ------------------------------

@dataclass
class EvalParams:
    C_max_total: int
    C_min_each: int
    time_horizon: float  # T (hours)
    # earnings aggregation: average over product list
    transport_fee_scale: float  # scales the count of transfers (proxy)
    # weights for scalarization (only for plotting single "best" if needed)
    w_negW: float = 1.0
    w_negTheta: float = 1.0

def evaluate_solution(plant: Plant,
                      C: np.ndarray,    # (n, m-1) int
                      Ts: np.ndarray,   # (n, m-1) in [0,1]
                      prm: EvalParams,
                      rng: Optional[np.random.RandomState]=None) -> Tuple[Tuple[float,float], Dict]:
    """
    Return (f1, f2) to MINIMIZE, where f1=-W, f2=-θ  ; plus metrics dict.

    This evaluation is a deterministic surrogate of the paper's analytical flow:
    1) Per line and per stage j, compute Sa, Pv, Pt, A, Pu, Ps.
    2) Compute Rb (M->B) and Rm (B->M) nominal; then adjust Rb by sharing where eligible using Ts.
    3) Effective stage rate Rj = min(Rm_up, Rs) flavor; we approximate as min(Rb_of_Bj, omega_{j+1}*A) cascade.
    4) Line throughput θ_i = min_j R_stage / In_i  (Eq.22)
    5) Line earnings W_i = (avg unit price) * Output_i - transport_costs
       Output_i ≈ θ_i * In_i * T
       Transport cost proxy: sum over buffers of (Ts * mask * fee_scale)
    6) Aggregate across lines and average θ.
    """
    n = len(plant.lines)
    m = len(plant.lines[0].machines)
    T = prm.time_horizon
    share_mask = plant.share_mask
    cost = plant.transport_cost

    # Precompute per machine availability
    A = np.zeros((n, m), dtype=float)
    om = np.zeros((n, m), dtype=float)
    for i, line in enumerate(plant.lines):
        for j, mp in enumerate(line.machines):
            A[i, j]  = availability(mp.MTBF_h, mp.MTTR_h)
            om[i, j] = mp.omega

    # Stage loop: for each line i, for j=1..m-1 (between M_j and M_{j+1})
    Pv = np.zeros((n, m-1), dtype=float)
    Pt = np.zeros((n, m-1), dtype=float)
    Pu = np.zeros((n, m-1), dtype=float)
    Ps = np.zeros((n, m-1), dtype=float)
    Rb = np.zeros((n, m-1), dtype=float)   # arrival to buffer j
    Rm = np.zeros((n, m), dtype=float)     # arrival to machine j (from buffer j-1)

    # Upstream of B_j is M_j; downstream is M_{j+1}
    for i in range(n):
        for j in range(m-1):
            Sa = stability_Sa(om[i, j], om[i, j+1])
            Pv[i, j] = vacancy_prob(Sa, int(C[i, j]))
            Pt[i, j] = stock_prob(Sa, Pv[i, j])
            Pu[i, j], Ps[i, j] = idle_block_probs(Sa, int(C[i, j]))
            # Rb from M->B
            Rb[i, j] = rb_arrival_MB(om[i, j], Pv[i, j], A[i, j], Pu[i, j], Ps[i, j])

    # Apply sharing adjustments to Rb using Ts with mask (pairwise same j across lines)
    # Here we blend source buffer i with all partner lines k at same j
    for j in range(m-1):
        # compute a temporary adjusted Rb
        Rb_new = Rb[:, j].copy()
        for i in range(n):
            # share to any dst line k
            for k in range(n):
                if k == i: continue
                if share_mask[i, k, j] <= 0.0: continue
                # dest buffer occupancy
                Rb_dst_down = om[k, j+1] * A[k, j+1]
                Rb_new[i] = update_with_sharing(
                    Rb_src=Rb_new[i],
                    Pv_src=Pv[i, j],
                    A_src=A[i, j],
                    Rb_dst_down=Rb_dst_down,
                    Pt_dst=Pt[k, j],
                    Pu_src=Pu[i, j],
                    Ps_src=Ps[i, j],
                    Ts_src=Ts[i, j],
                    mask_active=True
                )
        Rb[:, j] = Rb_new

    # Compute arrivals to machines: machine 0 has external supply; from B_{j-1} for j>=1
    # Approximate external arrival to M1 as its own capacity*availability (abundant input assumption in the paper)
    for i in range(n):
        Rm[i, 0] = om[i, 0] * A[i, 0]    # first machine arrival
        for j in range(1, m):
            # arrival to M_j from upstream buffer B_{j-1}
            # Use Pt_{j-1} as non-empty prob proxy and Rb_{j-1} as feed
            # Blend: min(Rb upstream, capacity of M_j)
            cap = om[i, j] * A[i, j]
            Rm[i, j] = min(Rb[i, j-1], cap)

    # Stage effective rates (serial): cascade min of stage rates
    eff_rate = np.zeros((n,), dtype=float)
    for i in range(n):
        # conservative effective line rate as min of machine arrivals (serial bottleneck)
        eff_rate[i] = min(Rm[i, :])

    # Throughput θ_i: normalize by input In_i (Eq.22); In_i>0.
    theta_i = np.zeros((n,), dtype=float)
    for i, line in enumerate(plant.lines):
        In = max(1e-6, line.input_qty)
        theta_i[i] = eff_rate[i] / In

    theta_mean = float(np.mean(theta_i))

    # Earnings W: average unit earning * output - transfer costs
    # Output over T: eff_rate * T
    W_lines = np.zeros((n,), dtype=float)
    # transfer proxy cost: sum over Ts * cost mask * scale
    transfer_cost_sum = 0.0
    for i, line in enumerate(plant.lines):
        unit_price = float(np.mean(line.unit_earnings)) if line.unit_earnings else 1.0
        W_lines[i] = unit_price * eff_rate[i] * T
        # accumulate transport proxies
        for j in range(m-1):
            for k in range(n):
                if k == i: continue
                if plant.share_mask[i, k, j] > 0:
                    transfer_cost_sum += Ts[i, j] * plant.transport_cost[i, k, j] * prm.transport_fee_scale

    W_total = float(np.sum(W_lines) - transfer_cost_sum)

    # Return MINIMIZATION objectives:
    f1 = -W_total
    f2 = -theta_mean
    metrics = dict(W=W_total, theta=theta_mean, eff_rate=eff_rate, theta_i=theta_i)
    return (f1, f2), metrics

# ------------------------------
# MOEA tools: non-dominated sorting & crowding
# ------------------------------

def fast_non_dominated_sort(F: np.ndarray) -> List[List[int]]:
    """
    F: (P, M) objective array to minimize.
    Returns list of fronts (each is list of indices).
    """
    P, M = F.shape
    S = [[] for _ in range(P)]
    n = np.zeros(P, dtype=int)
    rank = np.zeros(P, dtype=int)
    fronts: List[List[int]] = [[]]

    for p in range(P):
        S[p] = []
        n[p] = 0
        for q in range(P):
            if p == q: continue
            # dominance check
            if np.all(F[p] <= F[q]) and np.any(F[p] < F[q]):
                S[p].append(q)
            elif np.all(F[q] <= F[p]) and np.any(F[q] < F[p]):
                n[p] += 1
        if n[p] == 0:
            rank[p] = 0
            fronts[0].append(p)

    i = 0
    while len(fronts[i]) > 0:
        Q = []
        for p in fronts[i]:
            for q in S[p]:
                n[q] -= 1
                if n[q] == 0:
                    rank[q] = i + 1
                    Q.append(q)
        i += 1
        fronts.append(Q)
    fronts.pop()  # last empty
    return fronts

def crowding_distance(front: List[int], F: np.ndarray) -> np.ndarray:
    """Crowding distance for one front."""
    if len(front) == 0:
        return np.array([])
    m = F.shape[1]
    cd = np.zeros(len(front), dtype=float)
    idx = np.array(front, dtype=int)
    for k in range(m):
        order = np.argsort(F[idx, k])
        f = F[idx[order], k]
        cd[order[0]] = cd[order[-1]] = 1e9
        denom = max(1e-12, f[-1] - f[0])
        for i in range(1, len(front) - 1):
            cd[order[i]] += (f[i + 1] - f[i - 1]) / denom
    return cd

# ------------------------------
# C-MOHFA-like Firefly (simplified)
# ------------------------------

@dataclass
class GAParams:
    pop: int = 80
    gens: int = 200
    elite: int = 4
    # Firefly params
    attr0: float = 1.0
    gamma: float = 1.0
    # Gaussian disturbance bounds (decrease over time)
    kappa_hi: float = 0.15
    kappa_lo: float = 0.05
    # SA for Ts refinement
    sa_T0: float = 99.0
    sa_alpha: float = 0.98
    # Exchange ratio
    ex_ratio: float = 0.15
    # Random seed
    seed: int = 0

class CMOHFA:
    def __init__(self, plant: Plant, prm: EvalParams, gp: GAParams):
        self.plant = plant
        self.prm = prm
        self.gp = gp
        self.n = len(plant.lines)
        self.m = len(plant.lines[0].machines)
        # decision dims
        self.dimC = (self.m - 1)
        self.dimTs = (self.m - 1)
        self.rng = np.random.RandomState(gp.seed)

        # bounds for capacities
        self.Cmin = prm.C_min_each
        self.Cmax_total = prm.C_max_total
        # eligible sharing mask
        self.mask = plant.share_mask  # (n,n,m-1)

        # build per-line arrays for convenience
        self.nvarC = self.n * (self.m - 1)
        self.nvarTs = self.n * (self.m - 1)

    # ---------- initialization strategies ----------
    def init_random(self) -> Tuple[np.ndarray, np.ndarray]:
        """Random C then scale to total bound; random Ts with mask."""
        C = self.rng.randint(self.Cmin, self.Cmin + 20, size=(self.n, self.m - 1))
        self._scale_C_total(C)
        Ts = self.rng.rand(self.n, self.m - 1) * self._ts_allowed()
        return C, Ts

    def init_gauss_capacity(self) -> Tuple[np.ndarray, np.ndarray]:
        """Heuristic: Gaussian-ish profile over buffers to spread capacity."""
        C = np.zeros((self.n, self.m - 1), dtype=int)
        for i in range(self.n):
            x = np.linspace(0, 1, self.m - 1)
            mu = self.rng.uniform(0.3, 0.7)
            sigma = self.rng.uniform(0.12, 0.25)
            w = np.exp(-0.5 * ((x - mu) ** 2) / (sigma ** 2))
            w = (w + 1e-3) / np.sum(w + 1e-3)
            alloc = (w * (self.Cmax_total / self.n)).astype(int)
            alloc = np.maximum(alloc, self.Cmin)
            C[i] = alloc
        self._scale_C_total(C)
        Ts = 0.3 * np.ones((self.n, self.m - 1)) * self._ts_allowed()
        return C, Ts

    def init_upstream_rate(self) -> Tuple[np.ndarray, np.ndarray]:
        """Heuristic: allocate buffers proportional to upstream machine capacity."""
        C = np.zeros((self.n, self.m - 1), dtype=int)
        for i, ln in enumerate(self.plant.lines):
            up_rates = np.array([ln.machines[j].omega for j in range(self.m - 1)], dtype=float)
            w = up_rates / max(1e-9, np.sum(up_rates))
            alloc = (w * (self.Cmax_total / self.n)).astype(int)
            C[i] = np.maximum(alloc, self.Cmin)
        self._scale_C_total(C)
        Ts = 0.5 * np.ones((self.n, self.m - 1)) * self._ts_allowed()
        return C, Ts

    def _ts_allowed(self) -> np.ndarray:
        """A per (n,m-1) binary mask: Ts allowed if exists any partner k with share_mask[i,k,j]==1"""
        allow = np.zeros((self.n, self.m - 1), dtype=float)
        for i in range(self.n):
            for j in range(self.m - 1):
                allow[i, j] = 1.0 if np.any(self.mask[i, :, j] > 0) else 0.0
        return allow

    def _scale_C_total(self, C: np.ndarray):
        """Scale/repair C to meet total capacity constraint & floor each buffer by Cmin."""
        C[C < self.Cmin] = self.Cmin
        tot = int(np.sum(C))
        if tot <= self.Cmax_total:
            return
        # reduce proportionally
        over = tot - self.Cmax_total
        flat_idx = [(i, j) for i in range(self.n) for j in range(self.m - 1)]
        # reduce from biggest allocations first
        flat_idx.sort(key=lambda ij: C[ij[0], ij[1]], reverse=True)
        for i, j in flat_idx:
            if over <= 0:
                break
            reducible = C[i, j] - self.Cmin
            if reducible <= 0:
                continue
            d = min(reducible, max(1, over // 4))
            C[i, j] -= d
            over -= d
        if np.sum(C) > self.Cmax_total:
            # final trim 1 by 1
            for i, j in flat_idx:
                if np.sum(C) <= self.Cmax_total:
                    break
                if C[i, j] > self.Cmin:
                    C[i, j] -= 1

    # ---------- encoding helpers ----------
    def pack(self, C: np.ndarray, Ts: np.ndarray) -> np.ndarray:
        return np.concatenate([C.flatten().astype(float), Ts.flatten().astype(float)], axis=0)

    def unpack(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        c = x[:self.nvarC].reshape(self.n, self.m - 1)
        t = x[self.nvarC:].reshape(self.n, self.m - 1)
        return c.astype(int), t

    # ---------- fitness ----------
    def fitness(self, x: np.ndarray) -> Tuple[np.ndarray, Dict]:
        C, Ts = self.unpack(x)
        C_repaired = C.copy()
        self._scale_C_total(C_repaired)
        Ts = np.clip(Ts, 0.0, 1.0) * self._ts_allowed()
        (f1, f2), metrics = evaluate_solution(self.plant, C_repaired, Ts, self.prm, self.rng)
        return np.array([f1, f2], dtype=float), metrics

    # ---------- SA+local search for Ts (given C) ----------
    def refine_Ts_SA(self, C: np.ndarray, Ts_init: np.ndarray) -> np.ndarray:
        Ts = Ts_init.copy()
        allow = self._ts_allowed()
        T = self.gp.sa_T0
        best_Ts = Ts.copy()
        best_F, _ = self.fitness(self.pack(C, Ts))
        for _ in range(40):
            # neighbor: small gaussian move on allowed Ts
            nbh = Ts + self.rng.randn(*Ts.shape) * (0.08)
            nbh = np.clip(nbh, 0.0, 1.0) * allow
            F_new, _ = self.fitness(self.pack(C, nbh))
            if (F_new <= best_F).all() or self.rng.rand() < math.exp(-np.sum(F_new - best_F) / max(1e-6, T)):
                Ts = nbh
                if (F_new <= best_F).all():
                    best_F = F_new
                    best_Ts = Ts.copy()
            T *= self.gp.sa_alpha
        return best_Ts

    # ---------- main run ----------
    def run(self, verbose=True):
        rng = self.rng
        P = self.gp.pop
        G = self.gp.gens

        # mixed init
        inits = []
        for _ in range(P // 3):
            inits.append(self.init_random())
        for _ in range(P // 3):
            inits.append(self.init_gauss_capacity())
        while len(inits) < P:
            inits.append(self.init_upstream_rate())

        X = np.stack([self.pack(C, Ts) for (C, Ts) in inits], axis=0)

        # evaluate
        F = np.zeros((P, 2), dtype=float)
        MET = [None]*P
        for i in range(P):
            F[i], MET[i] = self.fitness(X[i])

        archive_X = []
        archive_F = []

        def update_archive(X, F):
            nonlocal archive_X, archive_F
            allX = (archive_X + [x.copy() for x in X])
            allF = (archive_F + [f.copy() for f in F])
            Farr = np.stack(allF, axis=0)
            fronts = fast_non_dominated_sort(Farr)
            newX, newF = [], []
            for idx in fronts[0]:  # only store first front to keep archive small
                newX.append(allX[idx].copy())
                newF.append(allF[idx].copy())
            archive_X, archive_F = newX, newF

        update_archive(list(X), list(F))

        # evolution
        for g in range(1, G+1):
            # non-dominated sort + crowding
            fronts = fast_non_dominated_sort(F)
            # collect elites by fronts+crowding
            elite_idx = []
            for fr in fronts:
                if len(elite_idx) >= self.gp.elite: break
                cd = crowding_distance(fr, F)
                order = np.argsort(-cd)  # higher crowding first
                for k in order:
                    if len(elite_idx) < self.gp.elite:
                        elite_idx.append(fr[k])
                    else:
                        break

            elites = X[elite_idx].copy()

            # Firefly movement
            attr0 = self.gp.attr0
            gamma = self.gp.gamma
            kappa = self.gp.kappa_hi - (self.gp.kappa_hi - self.gp.kappa_lo) * (g / G)

            Y = []
            while len(Y) < P - self.gp.elite:
                # pick two random individuals
                a, b = rng.randint(0, P), rng.randint(0, P)
                xa, xb = X[a].copy(), X[b].copy()
                Fa, Fb = F[a], F[b]
                # choose direction: move worse towards better (non-dominated-or-equal)
                move_from, move_to = (xa, xb) if np.any(Fa > Fb) else (xb, xa)
                # decode to do 2D movement on (C, Ts)
                Ca, Ta = self.unpack(move_from)
                Cb, Tb = self.unpack(move_to)

                # attractiveness depends on 2-norm distance in joint space
                da = np.linalg.norm(move_from - move_to)
                attr = attr0 * math.exp(-gamma * (da ** 2))

                # new position with gaussian disturbance (on both C and Ts)
                Cnew = Ca + attr * (Cb - Ca) + rng.randn(*Ca.shape) * kappa * 10.0
                Tsnew = Ta + attr * (Tb - Ta) + rng.randn(*Ta.shape) * kappa

                # project / repair
                Cnew = np.maximum(self.Cmin, np.round(Cnew)).astype(int)
                self._scale_C_total(Cnew)
                Tsnew = np.clip(Tsnew, 0.0, 1.0) * self._ts_allowed()

                # local SA refine Ts (hybridization)
                Tsnew = self.refine_Ts_SA(Cnew, Tsnew)

                Y.append(self.pack(Cnew, Tsnew))

            X_new = np.vstack([elites] + Y)

            # evaluate new
            F_new = np.zeros_like(F)
            MET_new = [None]*P
            for i in range(P):
                F_new[i], MET_new[i] = self.fitness(X_new[i])

            # selection: combine & keep best P by rank+crowding
            X_all = np.vstack([X, X_new])
            F_all = np.vstack([F, F_new])

            fronts_all = fast_non_dominated_sort(F_all)
            Z = []
            for fr in fronts_all:
                if len(Z) + len(fr) <= P:
                    Z += fr
                else:
                    cd = crowding_distance(fr, F_all)
                    order = np.argsort(-cd)
                    need = P - len(Z)
                    Z += [fr[k] for k in order[:need]]
                    break
            X = X_all[Z]
            F = F_all[Z]

            update_archive(list(X), list(F))

            if verbose and (g % 20 == 0 or g == 1):
                bestW = -np.min(F[:,0])
                bestTh = -np.min(F[:,1])
                print(f"[Gen {g:4d}] Archive size={len(archive_X)}  best_W≈{bestW:.2f}  best_theta≈{bestTh:.4f}")

        # decode archive to metrics
        arch_metrics = []
        for x in archive_X:
            C, Ts = self.unpack(x)
            _, metrics = self.fitness(self.pack(C, Ts))
            arch_metrics.append((C, Ts, metrics))
        return archive_X, np.stack(archive_F, axis=0), arch_metrics

# ------------------------------
# Demo instance (3 lines × 9 machines)
# ------------------------------

def build_demo_plant() -> Tuple[Plant, EvalParams, GAParams]:
    # Machines per line: use average omega within [low, high]; MTBF/MTTR per paper tables (approx).
    # For brevity, we fill with plausible numbers; you can replace with exact tables.
    def mk_line(lidx: int, omega_ranges: List[Tuple[int,int]],
                mtbf: List[int], mttr_min: List[float]) -> Line:
        machines = []
        for j in range(9):
            low, high = omega_ranges[j]
            omega = 0.5*(low+high) / 60.0  # convert per min -> per hour (optional scaling)
            MTBF_h = mtbf[j]
            MTTR_h = mttr_min[j] / 60.0
            machines.append(MachineParam(omega=omega, MTBF_h=MTBF_h, MTTR_h=MTTR_h))
        unit_earnings = [4.0, 4.5, 4.0, 5.0, 3.5]
        input_qty = 10000.0  # arbitrary horizon inputs
        return Line(machines=machines, unit_earnings=unit_earnings, input_qty=input_qty)

    # Example ranges (from the paper’s Tab.2 pattern but simplified)
    omega_ranges_L1 = [(15,20),(8,13),(16,23),(8,14),(13,21),(17,21),(12,15),(9,13),(10,22)]
    omega_ranges_L2 = [(15,18),(15,22),(9,12),(15,23),(8,13),(12,15),(17,23),(10,20),(9,18)]
    omega_ranges_L3 = [(17,22),(12,16),(15,18),(12,13),(14,23),(10,14),(8,12),(14,18),(10,20)]

    mtbf_L1 = [280,270,282,288,296,270,334,318,248]
    mttrm_L1 = [28,26,28,28.5,29,26.5,32,27,24]

    mtbf_L2 = [264,294,262,280,279,310,284,302,268]
    mttrm_L2 = [26,28.5,27,28,27,30,28,29.5,25]

    mtbf_L3 = [278,285,326,295,269,284,317,297,260]
    mttrm_L3 = [28,27,30,27.5,28,30,31,28.5,27]

    L1 = mk_line(0, omega_ranges_L1, mtbf_L1, mttrm_L1)
    L2 = mk_line(1, omega_ranges_L2, mtbf_L2, mttrm_L2)
    L3 = mk_line(2, omega_ranges_L3, mtbf_L3, mttrm_L3)

    lines = [L1, L2, L3]
    n = 3; m = 9

    # Sharing mask: only some buffers can share across lines (based on Tab.3 pattern)
    share_mask = np.zeros((n, n, m-1), dtype=int)
    # Example: B2 shares among all, B3 shares L2<->L3, B4 shares among all, B5 shares L2<->L3, B6 shares among all, B7 shares L1<->L3
    for i in range(n):
        for k in range(n):
            if i == k: continue
            # j index: 0..7 corresponds to B1..B8
            for j in [1,3,5]:  # B2,B4,B6 among all
                share_mask[i,k,j] = 1
            for j in [2,4]:    # B3,B5 between L2 & L3
                if (i in [1,2]) and (k in [1,2]):
                    share_mask[i,k,j] = 1
            for j in [6]:      # B7 between L1 & L3
                if (i in [0,2]) and (k in [0,2]):
                    share_mask[i,k,j] = 1
            # others remain 0

    # Transport cost (proxy): use small constants similar to paper Tab.3
    cost = np.zeros((n, n, m-1), dtype=float)
    # Fill few per mask
    def set_cost(i,k,j,val):
        cost[i,k,j] = val
    for i in range(n):
        for k in range(n):
            if i==k: continue
            if share_mask[i,k,1]==1: set_cost(i,k,1,5)
            if share_mask[i,k,2]==1: set_cost(i,k,2,4)
            if share_mask[i,k,3]==1: set_cost(i,k,3,5)
            if share_mask[i,k,4]==1: set_cost(i,k,4,4)
            if share_mask[i,k,5]==1: set_cost(i,k,5,5)
            if share_mask[i,k,6]==1: set_cost(i,k,6,6)

    plant = Plant(lines=lines, share_mask=share_mask, transport_cost=cost)

    # Evaluation & GA params
    prm = EvalParams(
        C_max_total=1500,
        C_min_each=5,
        time_horizon=24.0,     # hours
        transport_fee_scale=10.0,  # proxy scaling
        w_negW=1.0, w_negTheta=1.0
    )
    gp = GAParams(pop=80, gens=150, elite=6, seed=1,
                  attr0=1.0, gamma=1.0, kappa_hi=0.15, kappa_lo=0.05,
                  sa_T0=60.0, sa_alpha=0.97, ex_ratio=0.15)
    return plant, prm, gp

# ------------------------------
# Plotting
# ------------------------------

def plot_pareto(F: np.ndarray, title="Pareto Front (maximize W, θ)"):
    # Our F is (-W, -θ), so plot W vs θ
    W = -F[:,0]; TH = -F[:,1]
    plt.figure(figsize=(6,4))
    plt.scatter(W, TH, s=16)
    plt.xlabel("Total earnings W (higher is better)")
    plt.ylabel("Mean throughput θ (higher is better)")
    plt.title(title)
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plt.show()

# ------------------------------
# Main
# ------------------------------

def main():
    plant, prm, gp = build_demo_plant()
    solver = CMOHFA(plant, prm, gp)
    archive_X, archive_F, arch_details = solver.run(verbose=True)

    print(f"\nArchive size: {len(archive_X)} (non-dominated)")
    # Print the best by a simple scalarization (optional)
    best_idx = int(np.argmin(prm.w_negW*archive_F[:,0] + prm.w_negTheta*archive_F[:,1]))
    print("Representative solution (by equal weights):")
    repF = archive_F[best_idx]
    print(f"  W≈{-repF[0]:.2f}, theta≈{-repF[1]:.4f}")
    C, Ts, met = arch_details[best_idx]
    print("  Sample C row sums per line:", [int(np.sum(C[i])) for i in range(C.shape[0])])
    print("  A few Ts (non-zeros):")
    nz = np.argwhere(Ts>1e-6)
    for k in nz[:10]:
        i,j = int(k[0]), int(k[1])
        print(f"    Ts[L{i+1},B{j+1}]= {Ts[i,j]:.2f}")
    plot_pareto(np.array(archive_F), title="Pareto Front (maximize W, θ)")

if __name__ == "__main__":
    main()
