#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：experiments_cmohfa.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/28 20:04 
'''

# experiments_cmohfa.py
# See header comments for usage. Requires: numpy, matplotlib, simpy
import os, math, random, statistics, csv
from dataclasses import dataclass
from typing import List, Tuple, Dict, Any
import numpy as np
import matplotlib.pyplot as plt
import importlib.util, sys

# Load core module from local path (adjust if renamed)
BASE_PATH = os.path.join(os.path.dirname(__file__), "wip_buffer_c_mohfa.py")
spec = importlib.util.spec_from_file_location("wip_core", BASE_PATH)
wip_core = importlib.util.module_from_spec(spec)
sys.modules["wip_core"] = wip_core
spec.loader.exec_module(wip_core)

ProblemSpec = wip_core.ProblemSpec
MachineParams = wip_core.MachineParams
LineSpec = wip_core.LineSpec
ProductSpec = wip_core.ProductSpec
Encoding = wip_core.Encoding
evaluate = wip_core.evaluate
random_init = wip_core.random_init
heuristic_gaussian_init = wip_core.heuristic_gaussian_init
throughput_based_init = wip_core.throughput_based_init
fast_non_dominated_sort = wip_core.fast_non_dominated_sort
crowding_distance = wip_core.crowding_distance
dominates = wip_core.dominates

OUTDIR = os.path.join(os.path.dirname(__file__), "experiments_out13")
os.makedirs(OUTDIR, exist_ok=True)

def build_spec_lines_m_stages(n_lines: int, n_machines: int,
                              mtbf_hours=4.5, mttr_min=28/60,
                              cap_ranges=((10,14),(7,10),(9,13),(8,12)),
                              product_revenue=(4.0, 4.5, 4.0, 5.0, 3.5),
                              transfer_cost_base=(5.0, 4.0),
                              seed=123):
    assert n_machines >= 2
    base = list(cap_ranges)
    if len(base) < n_machines:
        k = (n_machines + len(base)-1)//len(base)
        base = (base * k)[:n_machines]
    else:
        base = base[:n_machines]
    lines = []
    for _ in range(n_lines):
        machines = [MachineParams(a,b, mtbf_hours, mttr_min) for (a,b) in base]
        lines.append(LineSpec(machines))
    n_buffers = n_machines-1
    transfer_costs = [[[0.0 for _ in range(n_buffers)] for _ in range(n_lines)] for _ in range(n_lines)]
    for i in range(n_lines):
        for k in range(n_lines):
            if i == k: continue
            for j in range(n_buffers):
                transfer_costs[i][k][j] = transfer_cost_base[j%2]
    products = [ProductSpec(name, rv) for name, rv in zip(["A","B","C","D","E"], product_revenue)]
    return ProblemSpec(lines=lines, products=products, transfer_costs=transfer_costs,
                       horizon_hours=1.0, warmup_hours=0.2, seed=seed)

def scale_transfer_costs(spec: ProblemSpec, factor: float) -> ProblemSpec:
    tc = [[[c*factor for c in row] for row in mat] for mat in spec.transfer_costs]
    return ProblemSpec(lines=spec.lines, products=spec.products, transfer_costs=tc,
                       horizon_hours=spec.horizon_hours, warmup_hours=spec.warmup_hours, seed=spec.seed)

def scale_reliability(spec: ProblemSpec, mtbf_scale: float=1.0, mttr_scale: float=1.0) -> ProblemSpec:
    new_lines = []
    for ln in spec.lines:
        new_machs = []
        for m in ln.machines:
            new_machs.append(MachineParams(m.cap_min, m.cap_max, m.mtbf_hours*mtbf_scale, m.mttr_min*mttr_scale))
        new_lines.append(LineSpec(new_machs))
    return ProblemSpec(lines=new_lines, products=spec.products, transfer_costs=spec.transfer_costs,
                       horizon_hours=spec.horizon_hours, warmup_hours=spec.warmup_hours, seed=spec.seed)

def skew_revenue(spec: ProblemSpec, skew_factor: float=1.5) -> ProblemSpec:
    vals = [p.unit_revenue for p in spec.products]
    mx = max(vals); idx = vals.index(mx)
    total = sum(vals)
    new_vals = vals[:]
    new_vals[idx] = mx * skew_factor
    s = sum(new_vals)
    new_vals = [v * total / max(s, 1e-9) for v in new_vals]
    new_prods = [ProductSpec(p.name, rv) for p, rv in zip(spec.products, new_vals)]
    return ProblemSpec(lines=spec.lines, products=new_prods, transfer_costs=spec.transfer_costs,
                       horizon_hours=spec.horizon_hours, warmup_hours=spec.warmup_hours, seed=spec.seed)

from dataclasses import dataclass
@dataclass
class ExConfig:
    pop: int = 16
    clusters: int = 3
    iters: int = 6
    init_mix: Tuple[float,float,float] = (0.34, 0.33, 0.33)
    use_init_random: bool = True
    use_init_gauss: bool = True
    use_init_throughput: bool = True
    use_SA: bool = True
    use_archive: bool = True
    seed: int = 1
    reps_eval: int = 1

def c_mohfa_ex(spec: ProblemSpec, C_total: int, cfg: ExConfig):
    random.seed(cfg.seed); np.random.seed(cfg.seed)
    nL = len(spec.lines); nB = len(spec.lines[0].machines) - 1
    region_params = ([0.52, 0.49, 0.47] * ((nL + 2) // 3))[:nL]
    dev_params = ([0.21, 0.19, 0.23] * ((nL + 2) // 3))[:nL]

    per_cluster = max(1, cfg.pop//cfg.clusters)
    cluster_pops: List[List[Any]] = []
    for c in range(cfg.clusters):
        ff = []
        for _ in range(per_cluster):
            init_choices = []
            if cfg.use_init_random: init_choices.append(("rand", cfg.init_mix[0]))
            if cfg.use_init_gauss: init_choices.append(("gauss", cfg.init_mix[1]))
            if cfg.use_init_throughput: init_choices.append(("thru", cfg.init_mix[2]))
            if not init_choices: init_choices = [("rand", 1.0)]
            totw = sum(w for _,w in init_choices)
            r = random.random() * totw; s = 0.0; tag = "rand"
            for t,w in init_choices:
                s += w
                if r <= s: tag = t; break
            if tag == "rand":
                enc = random_init(nL, nB, C_total)
            elif tag == "gauss":
                # enc = heuristic_gaussian_init(nL, nB, C_total, region_params=[0.52,0.49,0.47][:nL], dev_params=[0.21,0.19,0.23][:nL])
                enc = heuristic_gaussian_init(nL, nB, C_total, region_params=region_params, dev_params=dev_params)


            else:
                enc = throughput_based_init(spec, C_total)
            W, q, util = evaluate(spec, enc, reps=cfg.reps_eval)
            ff.append({"enc": enc, "obj": (W,q), "util": util})
        cluster_pops.append(ff)

    archive: List[Dict[str,Any]] = []
    def add_arc(f):
        if not cfg.use_archive: return
        archive.append({"enc": Encoding(f["enc"].capacities.copy(), f["enc"].transfers.copy()),
                        "obj": f["obj"], "util": f["util"]})

    base_attr = 1.0; gamma = 1.0
    for it in range(cfg.iters):
        all_ff = [x for cl in cluster_pops for x in cl]
        objs = [f["obj"] for f in all_ff]
        fronts = fast_non_dominated_sort(objs)
        if fronts:
            for idx in fronts[0]: add_arc(all_ff[idx])

        for ci, cl in enumerate(cluster_pops):
            objs = [f["obj"] for f in cl]
            fronts = fast_non_dominated_sort(objs)
            rank = {}
            for r, fr in enumerate(fronts):
                for idx in fr: rank[idx] = r
            cd = {}
            for fr in fronts: cd.update(wip_core.crowding_distance(fr, objs))

            new_cl = []
            for a_idx, a in enumerate(cl):
                candidates = [b_idx for b_idx in range(len(cl)) if b_idx != a_idx]
                if not candidates: new_cl.append(a); continue
                b_idx = random.choice(candidates)
                b = cl[b_idx]
                better = (rank.get(b_idx, 1e9) < rank.get(a_idx, 1e9)) or (rank.get(b_idx, 1e9) == rank.get(a_idx, 1e9) and cd.get(b_idx,0)>cd.get(a_idx,0))
                if better:
                    de = np.linalg.norm(b["enc"].capacities - a["enc"].capacities)
                    dt = np.linalg.norm(b["enc"].transfers - a["enc"].transfers)
                    beta = base_attr * math.exp(-gamma*(de*de + dt*dt))
                    new_caps = a["enc"].capacities + np.random.normal(0, 1.0, size=a["enc"].capacities.shape)*(1.0/(1+it))
                    new_caps = new_caps + beta*(b["enc"].capacities - a["enc"].capacities)
                    new_caps = np.maximum(new_caps, 1.0)
                    scale = C_total / max(new_caps.sum(), 1e-6)
                    new_caps = (new_caps * scale).round().astype(int)
                    new_trans = a["enc"].transfers + np.random.normal(0, 0.1, size=a["enc"].transfers.shape)*(1.0/(1+it))
                    new_trans = new_trans + beta*(b["enc"].transfers - a["enc"].transfers)
                    new_trans = np.clip(new_trans, 0.0, 1.0)
                    new_enc = Encoding(new_caps, new_trans)
                else:
                    new_enc = a["enc"].clone()

                W, q, util = evaluate(spec, new_enc, reps=cfg.reps_eval)
                if cfg.use_SA:
                    improved_trans, (W2, q2) = wip_core.sa_search_transfers(spec, new_enc, (W,q),
                                                                            T0=5.0, alpha=0.9, iters=5,
                                                                            step=0.05, gran=it/(cfg.iters+1))
                    if wip_core.dominates((W2,q2), (W,q)):
                        new_enc = Encoding(new_enc.capacities, improved_trans)
                        W, q = W2, q2
                new_cl.append({"enc": new_enc, "obj": (W,q), "util": util})
            cluster_pops[ci] = new_cl

        migrants = []
        for ci, cl in enumerate(cluster_pops):
            if not cl: continue
            k = max(1, int(0.1*len(cl))); k = min(k, len(cl))
            idxs = random.sample(range(len(cl)), k)
            migrants.extend([cl[idx] for idx in idxs])
            for idx in sorted(idxs, reverse=True): del cl[idx]
        for m in migrants:
            cluster_pops[random.randrange(len(cluster_pops))].append(m)

    all_ff = [x for cl in cluster_pops for x in cl] + ([{"enc": a["enc"], "obj": a["obj"], "util": a["util"]} for a in archive] if cfg.use_archive else [])
    uniq = {}
    for f in all_ff:
        key = (tuple(f["enc"].capacities.flatten()), tuple(np.round(f["enc"].transfers,3).flatten()))
        if key not in uniq or wip_core.dominates(f["obj"], uniq[key]["obj"]):
            uniq[key] = f
    finals = list(uniq.values())
    finals.sort(key=lambda f: (-f["obj"][0], -f["obj"][1]))
    return finals[:20]

def save_csv(path, rows: List[Dict[str,Any]], header_order=None):
    if not rows: return
    if header_order is None: header_order = list(rows[0].keys())
    with open(path, "w", newline="", encoding="utf-8") as f:
        w = csv.DictWriter(f, fieldnames=header_order)
        w.writeheader()
        for r in rows: w.writerow(r)

def plot_scatter_W_q(path, points: List[Tuple[float,float]], title="W vs theta"):
    plt.figure()
    xs = [p[1] for p in points]
    ys = [p[0] for p in points]
    plt.scatter(xs, ys)
    plt.xlabel("Average throughput (theta)")
    plt.ylabel("Revenue W")
    plt.title(title)
    plt.tight_layout()
    plt.savefig(path)
    plt.close()

def exp1_baselines(C_total_list=(200, 300, 400), seed=1):
    rows = []
    for Ctot in C_total_list:
        spec = wip_core.build_demo_spec()
        finals_share = c_mohfa_ex(spec, Ctot, ExConfig(seed=seed, pop=12, clusters=3, iters=4))
        def eval_no_share(enc):
            zero_t = np.zeros_like(enc.transfers)
            enc2 = Encoding(enc.capacities, zero_t)
            return evaluate(spec, enc2, reps=1)
        points = []
        for _ in range(24):
            enc = random_init(len(spec.lines), len(spec.lines[0].machines)-1, Ctot)
            W, q, _ = eval_no_share(enc)
            points.append((W,q))
        best_no = sorted(points, key=lambda x: (-x[0], -x[1]))[:5]
        for i, f in enumerate(finals_share[:5]):
            rows.append({"C_total": Ctot, "mode": "Share-CMOHFA", "rank": i+1, "W": f["obj"][0], "theta": f["obj"][1]})
        for i, p in enumerate(best_no):
            rows.append({"C_total": Ctot, "mode": "NoShare-Rand", "rank": i+1, "W": p[0], "theta": p[1]})
        plot_scatter_W_q(os.path.join(OUTDIR, f"exp1_C{Ctot}.png"),
                         [ (r["W"], r["theta"]) for r in rows if r["C_total"]==Ctot ],
                         title=f"Exp1: Share vs No-Share @ C_total={Ctot}")
    save_csv(os.path.join(OUTDIR, "exp1_baselines.csv"), rows, header_order=["C_total","mode","rank","W","theta"])

def exp2_ablations(C_total=300, seed=1):
    cases = [
        ("full", ExConfig(seed=seed, pop=12, clusters=3, iters=4, use_SA=True, use_archive=True,
                          use_init_random=True, use_init_gauss=True, use_init_throughput=True)),
        ("no_SA", ExConfig(seed=seed, pop=12, clusters=3, iters=4, use_SA=False, use_archive=True)),
        ("single_cluster", ExConfig(seed=seed, pop=12, clusters=1, iters=4)),
        ("only_random_init", ExConfig(seed=seed, pop=12, clusters=3, iters=4,
                                      use_init_random=True, use_init_gauss=False, use_init_throughput=False)),
        ("only_gauss_init", ExConfig(seed=seed, pop=12, clusters=3, iters=4,
                                     use_init_random=False, use_init_gauss=True, use_init_throughput=False)),
        ("only_thru_init", ExConfig(seed=seed, pop=12, clusters=3, iters=4,
                                    use_init_random=False, use_init_gauss=False, use_init_throughput=True)),
        ("no_archive", ExConfig(seed=seed, pop=12, clusters=3, iters=4, use_archive=False)),
    ]
    spec = wip_core.build_demo_spec()
    rows = []
    for name, cfg in cases:
        finals = c_mohfa_ex(spec, C_total, cfg)
        for i, f in enumerate(finals[:5]):
            rows.append({"case": name, "rank": i+1, "W": f["obj"][0], "theta": f["obj"][1]})
        plot_scatter_W_q(os.path.join(OUTDIR, f"exp2_{name}.png"),
                         [ (f["obj"][0], f["obj"][1]) for f in finals ],
                         title=f"Exp2: {name}")
    save_csv(os.path.join(OUTDIR, "exp2_ablations.csv"), rows, header_order=["case","rank","W","theta"])

def exp3_sensitivity(seed=1):
    base = wip_core.build_demo_spec()
    rows = []
    for C in (150, 250, 350, 450):
        finals = c_mohfa_ex(base, C, ExConfig(seed=seed, pop=12, clusters=3, iters=4))
        best = finals[0]["obj"]
        rows.append({"type": "C_total", "level": C, "W": best[0], "theta": best[1]})
    for fac in (0.5, 1.0, 2.0, 3.0):
        sp = scale_transfer_costs(base, fac)
        finals = c_mohfa_ex(sp, 300, ExConfig(seed=seed, pop=12, clusters=3, iters=4))
        best = finals[0]["obj"]
        rows.append({"type": "TransferCostMul", "level": fac, "W": best[0], "theta": best[1]})
    for mtbf_s, mttr_s in ((1.2, 0.8), (1.0, 1.0), (0.8, 1.2)):
        sp = scale_reliability(base, mtbf_scale=mtbf_s, mttr_scale=mttr_s)
        finals = c_mohfa_ex(sp, 300, ExConfig(seed=seed, pop=12, clusters=3, iters=4))
        best = finals[0]["obj"]
        rows.append({"type": "Reliability", "level": f"MTBFx{mtbf_s}_MTTRx{mttr_s}", "W": best[0], "theta": best[1]})
    for k in (1.0, 1.3, 1.6):
        sp = skew_revenue(base, skew_factor=k)
        finals = c_mohfa_ex(sp, 300, ExConfig(seed=seed, pop=12, clusters=3, iters=4))
        best = finals[0]["obj"]
        rows.append({"type": "RevenueSkew", "level": k, "W": best[0], "theta": best[1]})
    save_csv(os.path.join(OUTDIR, "exp3_sensitivity.csv"), rows, header_order=["type","level","W","theta"])
    ct = [r for r in rows if r["type"]=="C_total"]
    plt.figure(); plt.plot([r["level"] for r in ct], [r["W"] for r in ct], marker='o'); plt.xlabel("C_total"); plt.ylabel("W"); plt.title("Exp3: W vs C_total"); plt.tight_layout(); plt.savefig(os.path.join(OUTDIR, "exp3_W_vs_C.png")); plt.close()
    plt.figure(); plt.plot([r["level"] for r in ct], [r["theta"] for r in ct], marker='o'); plt.xlabel("C_total"); plt.ylabel("theta"); plt.title("Exp3: theta vs C_total"); plt.tight_layout(); plt.savefig(os.path.join(OUTDIR, "exp3_theta_vs_C.png")); plt.close()

def exp4_scalability(seed=1):
    rows = []
    settings = [
        (3,4,200),
        (4,5,300),
        (6,6,400),
    ]
    for nL, nM, C in settings:
        sp = build_spec_lines_m_stages(nL, nM, seed=seed)
        finals = c_mohfa_ex(sp, C, ExConfig(seed=seed, pop=16, clusters=3, iters=5))
        best = finals[0]["obj"]
        rows.append({"lines": nL, "stages": nM, "C_total": C, "W": best[0], "theta": best[1]})
        plot_scatter_W_q(os.path.join(OUTDIR, f"exp4_{nL}x{nM}.png"),
                         [ (f["obj"][0], f["obj"][1]) for f in finals ],
                         title=f"Exp4: Pareto (lines={nL}, stages={nM})")
    save_csv(os.path.join(OUTDIR, "exp4_scalability.csv"), rows, header_order=["lines","stages","C_total","W","theta"])

def main():
    random.seed(1); np.random.seed(1)
    exp1_baselines(C_total_list=(200,300,400), seed=1)
    exp2_ablations(C_total=300, seed=1)
    exp3_sensitivity(seed=1)
    exp4_scalability(seed=1)
    print("All experiments configured. CSVs and PNGs saved to:", OUTDIR)

if __name__ == "__main__":
    main()
