#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：HCQT1.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/28 19:54
GPT搭建的
'''
# GPT搭建的相关的环境和代码函数

# 1) 基线与对比（必须有）
# 共享 vs 不共享（No-Share）：同样的缓冲总量
# 𝐶
# total
# C
# total
# 	​
#
# ，比较收益
# 𝑊
# W、吞吐
# 𝜃
# θ、转运成本、设备平均利用率。
# 预期：共享在中高缓冲时段
# 𝑊
# W 优势明显；
# 𝜃
# θ 更稳。
#
# 算法对比：C-MOHFA vs NSGA-II / MOEA-D / SPEA2（可直接替换你的非支配排序与变异模块）。
# 指标：帕累托超体积（HV）、IGD、Spread、收敛曲线（HV-vs-evaluations）、运行时。
#
# 启发式规则对比：常见静态分配（按工序“平均分”/“按瓶颈加权”）作为“工程基线”，展示你方法的收益提升幅度。
#
# 2) 消融实验（解释方法贡献）
#
# 初始化策略消融：只用随机 / 只用高斯启发 / 只用吞吐启发 / 三者混合。看 HV、收敛速度与稳定性。
#
# 传输率局部搜索（SA+邻域）开/关：证明“只调容量 vs ‘容量+传输率细化’”的增益。
#
# 多种群与跨群迁移开/关：验证集群化与信息交换的必要性。
#
# 档案机制开/关：关闭“容量→传输率映射档案”后，比较同等评估预算下的质量差异。
#
# 3) 敏感性分析（参数稳健性）
#
# 缓冲总量
# 𝐶
# total
# C
# total
# 	​
#
#  扫描：低/中/高 3–5 档；观察
# 𝑊
# W-
# 𝜃
# θ 曲线、利用率、阻塞/饥饿。
#
# 转运成本（低/中/高）：检验共享策略的“临界区间”（何时转运不再划算）。
#
# 可靠性（MTBF↑、MTTR↓ 为“高可用” vs 反之为“低可用”）：看共享对故障吸收能力。
#
# 产品收益结构（均衡 vs 偏斜）：偏斜时共享是否更“追高收益线”。
#
# 预热时长、仿真窗口：验证统计量稳定性（方差、CI）。
#
# 4) 规模化与泛化
#
# 线数×工序数扩展：如 3×4 → 4×5 → 6×6，观察算法时间随规模的增长率、HV 退化幅度。
#
# 不同布局（不同瓶颈位置）：前段/中段/末段瓶颈，看最佳缓冲分布的迁移规律。
#
# 跨实例迁移：固定“好解”的容量分布，换另一组可靠性或收益表，只微调传输率，测试重用性。






import math
import random
import statistics
from dataclasses import dataclass, field
from typing import List, Tuple, Dict, Any
import simpy
import numpy as np

# -----------------------------
# Problem definition & data
# -----------------------------

@dataclass
class MachineParams:
    cap_min: float
    cap_max: float
    mtbf_hours: float
    mttr_min: float

@dataclass
class LineSpec:
    machines: List[MachineParams]

@dataclass
class ProductSpec:
    name: str
    unit_revenue: float

@dataclass
class ProblemSpec:
    lines: List[LineSpec]
    products: List[ProductSpec]
    transfer_costs: List[List[List[float]]]
    horizon_hours: float = 6.0
    warmup_hours: float = 1.0
    seed: int = 42

# -----------------------------
# Encoding: capacities & transfer rates
# -----------------------------

@dataclass
class Encoding:
    capacities: np.ndarray  # shape (n_lines, n_buffers), int
    transfers: np.ndarray   # shape (n_lines, n_buffers), float in [0,1]

    def clone(self):
        return Encoding(self.capacities.copy(), self.transfers.copy())

# -----------------------------
# SimPy model
# -----------------------------

class FlowShopEnv:
    def __init__(self, spec: ProblemSpec, enc: Encoding):
        random.seed(spec.seed)
        np.random.seed(spec.seed)
        self.spec = spec
        self.enc = enc
        self.env = simpy.Environment()
        self.n_lines = len(spec.lines)
        self.m = len(spec.lines[0].machines)
        self.n_buffers = self.m - 1
        self.buffers = [[simpy.Store(self.env, capacity=int(enc.capacities[i, j]))
                         for j in range(self.n_buffers)]
                        for i in range(self.n_lines)]
        self.machine_busy = [[False]*self.m for _ in range(self.n_lines)]
        self.completed = [0]*self.n_lines
        self.revenue = 0.0
        self.transfer_cost_total = 0.0
        self.util_time = [[0.0]*self.m for _ in range(self.n_lines)]
        self.last_util_change = [[0.0]*self.m for _ in range(self.n_lines)]

    def machine_process(self, i: int, j: int):
        params = self.spec.lines[i].machines[j]
        rate = random.uniform(params.cap_min, params.cap_max)
        mean_proc_min = 1.0 / max(rate, 1e-6)
        mtbf_min = params.mtbf_hours * 60.0
        mttr_min = params.mttr_min

        def time_to_failure():
            return random.expovariate(1.0 / max(mtbf_min, 1e-6))

        def repair_time():
            return random.expovariate(1.0 / max(mttr_min, 1e-6))

        up_buf = None if j == 0 else self.buffers[i][j-1]
        dn_buf = None if j == self.m-1 else self.buffers[i][j]

        while True:
            if j > 0:
                item = yield up_buf.get()
            else:
                item = {"product": random.choice(self.spec.products).name}

            start = self.env.now
            self.machine_busy[i][j] = True
            self.last_util_change[i][j] = start

            remaining = np.random.exponential(mean_proc_min)
            while remaining > 1e-6:
                ttf = time_to_failure()
                if ttf >= remaining:
                    yield self.env.timeout(remaining)
                    remaining = 0.0
                else:
                    yield self.env.timeout(ttf)
                    self.machine_busy[i][j] = False
                    self.util_time[i][j] += self.env.now - self.last_util_change[i][j]
                    yield self.env.timeout(repair_time())
                    self.machine_busy[i][j] = True
                    self.last_util_change[i][j] = self.env.now
                    remaining -= ttf

            end = self.env.now
            if j == self.m - 1:
                self.completed[i] += 1
                self.revenue += self._unit_revenue(item["product"])
            else:
                yield self.env.process(self._put_with_blocking(dn_buf, item))

            self.machine_busy[i][j] = False
            self.util_time[i][j] += end - self.last_util_change[i][j]

    def _put_with_blocking(self, store: simpy.Store, item):
        if len(store.items) >= store.capacity:
            while len(store.items) >= store.capacity:
                yield self.env.timeout(0.1)
        yield store.put(item)

    def _unit_revenue(self, prod_name: str) -> float:
        for p in self.spec.products:
            if p.name == prod_name:
                return p.unit_revenue
        return 0.0

    def wip_sharing_process(self, j: int):
        while True:
            yield self.env.timeout(1.0)
            bufs = [self.buffers[i][j] for i in range(self.n_lines)]
            for i in range(self.n_lines):
                for k in range(self.n_lines):
                    if i == k:
                        continue
                    src = bufs[i]
                    dst = bufs[k]
                    if len(src.items) >= src.capacity and len(dst.items) == 0:
                        ts = float(self.enc.transfers[i, j])
                        if ts <= 0:
                            continue
                        if random.random() < ts:
                            try:
                                item = src.items.pop(0)
                            except IndexError:
                                continue
                            if len(dst.items) >= dst.capacity:
                                src.items.insert(0, item)
                                continue
                            dst.items.append(item)
                            self.transfer_cost_total += self.spec.transfer_costs[i][k][j]

    def run(self):
        for i in range(self.n_lines):
            for j in range(self.m):
                self.env.process(self.machine_process(i, j))
        for j in range(self.n_buffers):
            self.env.process(self.wip_sharing_process(j))

        self.env.run(until=self.spec.warmup_hours*60)
        self.completed = [0]*self.n_lines
        self.revenue = 0.0
        self.transfer_cost_total = 0.0
        self.util_time = [[0.0]*self.m for _ in range(self.n_lines)]
        self.last_util_change = [[self.env.now]*self.m for _ in range(self.n_lines)]
        self.env.run(until=(self.spec.warmup_hours + self.spec.horizon_hours)*60)

        line_rates = [c / (self.spec.horizon_hours*60) for c in self.completed]
        q = sum(line_rates)/len(line_rates) if line_rates else 0.0
        W = self.revenue - self.transfer_cost_total
        util = sum(sum(u) for u in self.util_time) / (self.n_lines*self.m*self.spec.horizon_hours*60)
        return W, q, util

# -----------------------------
# Multi-objective tools
# -----------------------------

def dominates(a: Tuple[float,float], b: Tuple[float,float]) -> bool:
    return (a[0] >= b[0] and a[1] >= b[1]) and (a[0] > b[0] or a[1] > b[1])

def fast_non_dominated_sort(objs: List[Tuple[float,float]]) -> List[List[int]]:
    S = [set() for _ in objs]
    n = [0]*len(objs)
    fronts = [[]]
    for p in range(len(objs)):
        for q in range(len(objs)):
            if p == q: continue
            if dominates(objs[p], objs[q]):
                S[p].add(q)
            elif dominates(objs[q], objs[p]):
                n[p] += 1
        if n[p] == 0:
            fronts[0].append(p)
    i = 0
    while fronts[i]:
        next_front = []
        for p in fronts[i]:
            for q in S[p]:
                n[q] -= 1
                if n[q] == 0:
                    next_front.append(q)
        i += 1
        fronts.append(next_front)
    if not fronts[-1]:
        fronts.pop()
    return fronts

def crowding_distance(front: List[int], objs: List[Tuple[float,float]]) -> Dict[int, float]:
    if not front: return {}
    distances = {idx: 0.0 for idx in front}
    for m in range(2):
        front_sorted = sorted(front, key=lambda idx: objs[idx][m])
        distances[front_sorted[0]] = float('inf')
        distances[front_sorted[-1]] = float('inf')
        vals = [objs[idx][m] for idx in front_sorted]
        vmax, vmin = max(vals), min(vals)
        if vmax == vmin:
            continue
        for k in range(1, len(front_sorted)-1):
            i, j = front_sorted[k-1], front_sorted[k+1]
            distances[front_sorted[k]] += (objs[j][m] - objs[i][m])/(vmax - vmin + 1e-12)
    return distances

# -----------------------------
# Initialization strategies
# -----------------------------

def random_init(n_lines, n_buffers, C_total, cap_bounds=(1,50)):
    caps = np.zeros((n_lines, n_buffers), dtype=int)
    slots = n_lines * n_buffers
    parts = np.random.dirichlet(np.ones(slots))
    raw = (parts * C_total).round().astype(int)
    diff = C_total - raw.sum()
    for _ in range(abs(int(diff))):
        k = np.random.randint(0, slots)
        raw[k] += int(math.copysign(1, diff))
    caps = raw.reshape(n_lines, n_buffers)
    caps = np.clip(caps, cap_bounds[0], cap_bounds[1])
    trans = np.random.rand(n_lines, n_buffers)
    return Encoding(caps, trans)

def heuristic_gaussian_init(n_lines, n_buffers, C_total, region_params=None, dev_params=None):
    caps = np.zeros((n_lines, n_buffers), dtype=int)
    for i in range(n_lines):
        mu = (n_buffers-1) * (region_params[i] if region_params else 0.5)
        sigma = (dev_params[i]*n_buffers if dev_params else 0.2*n_buffers) + 1e-6
        xs = np.arange(n_buffers)
        weights = np.exp(-0.5*((xs-mu)/sigma)**2)
        weights = weights / (weights.sum() + 1e-12)
        alloc = (weights * (C_total/n_lines)).round().astype(int)
        diff = int(C_total/n_lines) - int(alloc.sum())
        for _ in range(abs(diff)):
            k = np.random.randint(0, n_buffers)
            alloc[k] += int(math.copysign(1, diff))
        caps[i,:] = alloc
    trans = np.clip(np.random.normal(0.5, 0.15, size=(n_lines, n_buffers)), 0, 1)
    return Encoding(caps, trans)

def throughput_based_init(spec: ProblemSpec, C_total):
    n_lines = len(spec.lines)
    m = len(spec.lines[0].machines)
    n_buffers = m-1
    caps = np.zeros((n_lines, n_buffers), dtype=int)
    for i in range(n_lines):
        avgs = [statistics.mean((spec.lines[i].machines[j].cap_min, spec.lines[i].machines[j].cap_max))
                for j in range(m-1)]
        w = np.array(avgs); w = w / (w.sum() + 1e-12)
        alloc = (w * (C_total/n_lines)).round().astype(int)
        diff = int(C_total/n_lines) - int(alloc.sum())
        for _ in range(abs(diff)):
            k = np.random.randint(0, n_buffers)
            alloc[k] += int(math.copysign(1, diff))
        caps[i,:] = alloc
    trans = np.random.rand(n_lines, n_buffers)
    return Encoding(caps, trans)

# -----------------------------
# Transfer-rate local search (SA + neighborhood)
# -----------------------------

def neighbor_transfers(trans: np.ndarray, step: float, gran: float):
    nL, nB = trans.shape
    cand = trans.copy()
    i = np.random.randint(0, nL)
    j = np.random.randint(0, nB)
    delta = np.random.uniform(-step, step) * (1.0 + gran)
    cand[i,j] = np.clip(cand[i,j] + delta, 0.0, 1.0)
    return cand

def sa_search_transfers(spec: ProblemSpec, enc: Encoding, base_Wq: Tuple[float,float],
                        T0=10.0, alpha=0.95, iters=20, step=0.05, gran=0.0):
    best_trans = enc.transfers.copy()
    best_val = base_Wq
    curr_trans = enc.transfers.copy()
    curr_val = base_Wq
    T = T0
    for _ in range(iters):
        cand_trans = neighbor_transfers(curr_trans, step, gran)
        cand_enc = Encoding(enc.capacities, cand_trans)
        W, q, _ = evaluate(spec, cand_enc, reps=1)
        cand_val = (W, q)
        def score(v): return 0.5*v[0] + 1000.0*v[1]
        dE = score(cand_val) - score(curr_val)
        if dE >= 0 or random.random() < math.exp(dE / max(T, 1e-6)):
            curr_trans = cand_trans
            curr_val = cand_val
            if dominates(cand_val, best_val):
                best_trans = cand_trans
                best_val = cand_val
        T *= alpha
    return best_trans, best_val

# -----------------------------
# Firefly cluster optimizer (C-MOHFA-like)
# -----------------------------

@dataclass
class Firefly:
    enc: Encoding
    obj: Tuple[float,float] = (float('-inf'), float('-inf'))
    util: float = 0.0

def evaluate(spec: ProblemSpec, enc: Encoding, reps=2) -> Tuple[float,float,float]:
    Ws, qs, utils = [], [], []
    for r in range(reps):
        env = FlowShopEnv(spec, enc)
        W, q, util = env.run()
        Ws.append(W); qs.append(q); utils.append(util)
    return (statistics.mean(Ws), statistics.mean(qs), statistics.mean(utils))

def c_mohfa(spec: ProblemSpec, C_total: int, pop=24, clusters=3, iters=10,
            init_mix=(0.34, 0.33, 0.33), seed=0):
    random.seed(seed); np.random.seed(seed)
    nL = len(spec.lines); nB = len(spec.lines[0].machines) - 1
    cluster_pops: List[List[Firefly]] = []
    per_cluster = max(1, pop // clusters)
    for c in range(clusters):
        ff: List[Firefly] = []
        for k in range(per_cluster):
            r = random.random()
            if r < init_mix[0]:
                enc = random_init(nL, nB, C_total)
            elif r < init_mix[0] + init_mix[1]:
                enc = heuristic_gaussian_init(nL, nB, C_total,
                                              region_params=[0.52, 0.49, 0.47][:nL],
                                              dev_params=[0.21, 0.19, 0.23][:nL])
            else:
                enc = throughput_based_init(spec, C_total)
            W, q, util = evaluate(spec, enc, reps=1)
            ff.append(Firefly(enc, (W,q), util))
        cluster_pops.append(ff)

    archive: List[Firefly] = []

    def add_to_archive(f: Firefly):
        archive.append(Firefly(f.enc.clone(), f.obj, f.util))

    base_attr = 1.0
    gamma = 1.0
    for it in range(iters):
        all_ff = [x for cl in cluster_pops for x in cl]
        objs = [f.obj for f in all_ff]
        fronts = fast_non_dominated_sort(objs)
        if fronts:
            for idx in fronts[0]:
                add_to_archive(all_ff[idx])

        for ci, cl in enumerate(cluster_pops):
            objs = [f.obj for f in cl]
            fronts = fast_non_dominated_sort(objs)
            rank = {}
            for r, fr in enumerate(fronts):
                for idx in fr:
                    rank[idx] = r
            cd = {}
            for fr in fronts:
                cd.update(crowding_distance(fr, objs))

            new_cl: List[Firefly] = []
            for a_idx, a in enumerate(cl):
                candidates = [b_idx for b_idx in range(len(cl)) if b_idx != a_idx]
                if not candidates:
                    new_cl.append(a); continue
                b_idx = random.choice(candidates)
                b = cl[b_idx]
                better = (rank.get(b_idx, 1e9) < rank.get(a_idx, 1e9)) or (rank.get(b_idx, 1e9) == rank.get(a_idx, 1e9) and cd.get(b_idx,0)>cd.get(a_idx,0))
                if better:
                    de = np.linalg.norm(b.enc.capacities - a.enc.capacities)
                    dt = np.linalg.norm(b.enc.transfers - a.enc.transfers)
                    beta = base_attr * math.exp(-gamma*(de*de + dt*dt))
                    new_caps = a.enc.capacities + np.random.normal(0, 1.0, size=a.enc.capacities.shape) * (1.0/(1+it))
                    new_caps = new_caps + beta * (b.enc.capacities - a.enc.capacities)
                    new_caps = np.maximum(new_caps, 1.0)
                    scale = C_total / max(new_caps.sum(), 1e-6)
                    new_caps = (new_caps * scale).round().astype(int)
                    new_trans = a.enc.transfers + np.random.normal(0, 0.1, size=a.enc.transfers.shape) * (1.0/(1+it))
                    new_trans = new_trans + beta * (b.enc.transfers - a.enc.transfers)
                    new_trans = np.clip(new_trans, 0.0, 1.0)
                    new_enc = Encoding(new_caps, new_trans)
                else:
                    new_enc = a.enc.clone()

                W, q, util = evaluate(spec, new_enc, reps=1)
                improved_trans, (W2, q2) = sa_search_transfers(spec, new_enc, (W,q),
                                                               T0=5.0, alpha=0.9, iters=5,
                                                               step=0.05, gran=it/(iters+1))
                new_enc = Encoding(new_enc.capacities, improved_trans)
                if dominates((W2,q2), (W,q)):
                    W, q = W2, q2
                new_cl.append(Firefly(new_enc, (W,q), util))

            cluster_pops[ci] = new_cl

        migrants: List[Firefly] = []
        for ci, cl in enumerate(cluster_pops):
            k = max(1, int(0.1*len(cl)))
            if len(cl) == 0: continue
            k = min(k, len(cl))
            idxs = random.sample(range(len(cl)), k)
            migrants.extend([cl[idx] for idx in idxs])
            for idx in sorted(idxs, reverse=True):
                del cl[idx]
        for mig in migrants:
            if not cluster_pops:
                break
            ci = random.randrange(len(cluster_pops))
            cluster_pops[ci].append(mig)

    all_ff = [x for cl in cluster_pops for x in cl] + archive
    uniq = {}
    for f in all_ff:
        key = (tuple(f.enc.capacities.flatten()), tuple(np.round(f.enc.transfers,3).flatten()))
        if key not in uniq or dominates(f.obj, uniq[key].obj):
            uniq[key] = f
    finals = list(uniq.values())
    finals.sort(key=lambda f: (-f.obj[0], -f.obj[1]))
    return finals[:20]

# -----------------------------
# Example usage (small demo)
# -----------------------------

def build_demo_spec() -> ProblemSpec:
    lines = []
    for _ in range(3):
        machines = [
            MachineParams(10, 14, 4.5, 28/60),
            MachineParams(7, 10, 4.5, 28/60),
            MachineParams(9, 13, 4.5, 28/60),
            MachineParams(8, 12, 4.5, 28/60),
        ]
        lines.append(LineSpec(machines))
    transfer_costs = [[[0.0 for _ in range(3)] for _ in range(3)] for _ in range(3)]
    for i in range(3):
        for k in range(3):
            if i == k: continue
            for j in range(3):
                transfer_costs[i][k][j] = 5.0 if j%2==0 else 4.0

    products = [
        ProductSpec("A", 4.0),
        ProductSpec("B", 4.5),
        ProductSpec("C", 4.0),
        ProductSpec("D", 5.0),
        ProductSpec("E", 3.5),
    ]
    return ProblemSpec(lines=lines, products=products, transfer_costs=transfer_costs,
                       horizon_hours=1.0, warmup_hours=0.2, seed=123)

if __name__ == "__main__":
    spec = build_demo_spec()
    C_total = 300
    finals = c_mohfa(spec, C_total, pop=12, clusters=3, iters=4, seed=1)
    print("Top solutions (W, q):")
    for i, f in enumerate(finals[:5], 1):
        print(f"{i:2d}. W={f.obj[0]:.1f}, q={f.obj[1]:.4f}, util~{f.util:.3f}")
